|
| 1 | +# Leaky Subscriber Example |
| 2 | +# |
| 3 | +# This example demonstrates the "leaky subscriber" feature, which allows |
| 4 | +# slow consumers to drop old messages rather than blocking fast producers. |
| 5 | +# |
| 6 | +# Scenario: |
| 7 | +# - A fast publisher produces messages at ~10 Hz (every 100ms) |
| 8 | +# - A slow subscriber processes messages at ~1 Hz (1000ms per message) |
| 9 | +# - Without leaky mode: the publisher would be blocked by backpressure |
| 10 | +# - With leaky mode: old messages are dropped, subscriber always gets recent data |
| 11 | +# |
| 12 | +# This is useful for real-time applications where you want the latest data |
| 13 | +# rather than processing a growing backlog of stale messages. |
| 14 | + |
| 15 | +import asyncio |
| 16 | +import typing |
| 17 | + |
| 18 | +from dataclasses import dataclass, field |
| 19 | + |
| 20 | +import ezmsg.core as ez |
| 21 | + |
| 22 | + |
| 23 | +@dataclass |
| 24 | +class TimestampedMessage: |
| 25 | + """A message with sequence number and timestamp for tracking latency.""" |
| 26 | + |
| 27 | + seq: int |
| 28 | + created_at: float = field(default_factory=lambda: asyncio.get_event_loop().time()) |
| 29 | + |
| 30 | + |
| 31 | +class FastPublisherSettings(ez.Settings): |
| 32 | + num_messages: int = 20 |
| 33 | + publish_interval_sec: float = 0.1 # 10 Hz |
| 34 | + |
| 35 | + |
| 36 | +class FastPublisher(ez.Unit): |
| 37 | + """Publishes messages at ~10 Hz.""" |
| 38 | + |
| 39 | + SETTINGS = FastPublisherSettings |
| 40 | + |
| 41 | + OUTPUT = ez.OutputStream(TimestampedMessage, num_buffers=32) |
| 42 | + |
| 43 | + @ez.publisher(OUTPUT) |
| 44 | + async def publish(self) -> typing.AsyncGenerator: |
| 45 | + |
| 46 | + for seq in range(self.SETTINGS.num_messages): |
| 47 | + msg = TimestampedMessage(seq=seq) |
| 48 | + print(f"[Publisher] Sending seq={seq}", flush=True) |
| 49 | + yield (self.OUTPUT, msg) |
| 50 | + await asyncio.sleep(self.SETTINGS.publish_interval_sec) |
| 51 | + |
| 52 | + print("[Publisher] Done sending all messages", flush=True) |
| 53 | + raise ez.Complete |
| 54 | + |
| 55 | + |
| 56 | +class SlowSubscriberSettings(ez.Settings): |
| 57 | + process_time_sec: float = 1.0 # Simulates slow processing at ~1 Hz |
| 58 | + expected_messages: int = 20 |
| 59 | + |
| 60 | + |
| 61 | +class SlowSubscriberState(ez.State): |
| 62 | + received_seqs: list |
| 63 | + received_count: int = 0 |
| 64 | + total_latency: float = 0.0 |
| 65 | + |
| 66 | + |
| 67 | +class SlowSubscriber(ez.Unit): |
| 68 | + """ |
| 69 | + A slow subscriber that takes 1 second to process each message. |
| 70 | +
|
| 71 | + Uses a leaky InputStream to drop old messages when it can't keep up, |
| 72 | + ensuring it always processes relatively recent data. |
| 73 | + """ |
| 74 | + |
| 75 | + SETTINGS = SlowSubscriberSettings |
| 76 | + STATE = SlowSubscriberState |
| 77 | + |
| 78 | + # Leaky input stream; oldest messages are dropped |
| 79 | + INPUT = ez.InputStream(TimestampedMessage, leaky=True) |
| 80 | + |
| 81 | + async def initialize(self) -> None: |
| 82 | + self.STATE.received_seqs = [] |
| 83 | + |
| 84 | + @ez.subscriber(INPUT) |
| 85 | + async def on_message(self, msg: TimestampedMessage) -> None: |
| 86 | + now = asyncio.get_event_loop().time() |
| 87 | + latency_ms = (now - msg.created_at) * 1000 |
| 88 | + |
| 89 | + self.STATE.received_count += 1 |
| 90 | + self.STATE.total_latency += latency_ms |
| 91 | + self.STATE.received_seqs.append(msg.seq) |
| 92 | + |
| 93 | + print( |
| 94 | + f"[Subscriber] Processing seq={msg.seq:3d}, latency={latency_ms:6.0f}ms", |
| 95 | + flush=True, |
| 96 | + ) |
| 97 | + |
| 98 | + # Simulate slow processing |
| 99 | + await asyncio.sleep(self.SETTINGS.process_time_sec) |
| 100 | + |
| 101 | + # Terminate after receiving the last message |
| 102 | + if msg.seq == self.SETTINGS.expected_messages - 1: |
| 103 | + raise ez.NormalTermination |
| 104 | + |
| 105 | + async def shutdown(self) -> None: |
| 106 | + dropped = self.SETTINGS.expected_messages - self.STATE.received_count |
| 107 | + avg_latency = ( |
| 108 | + self.STATE.total_latency / self.STATE.received_count |
| 109 | + if self.STATE.received_count > 0 |
| 110 | + else 0 |
| 111 | + ) |
| 112 | + |
| 113 | + print("\n" + "=" * 60, flush=True) |
| 114 | + print("LEAKY SUBSCRIBER SUMMARY", flush=True) |
| 115 | + print("=" * 60, flush=True) |
| 116 | + print(f" Messages published: {self.SETTINGS.expected_messages}", flush=True) |
| 117 | + print(f" Messages received: {self.STATE.received_count}", flush=True) |
| 118 | + print(f" Messages dropped: {dropped}", flush=True) |
| 119 | + print(f" Sequences received: {self.STATE.received_seqs}", flush=True) |
| 120 | + print(f" Average latency: {avg_latency:.0f}ms", flush=True) |
| 121 | + print("=" * 60, flush=True) |
| 122 | + print( |
| 123 | + "\nNote: With leaky=True, the subscriber drops old messages to stay\n" |
| 124 | + " current. Without it, backpressure would slow the publisher.", |
| 125 | + flush=True, |
| 126 | + ) |
| 127 | + |
| 128 | + |
| 129 | +class LeakyDemo(ez.Collection): |
| 130 | + """Demo system with a fast publisher and slow leaky subscriber.""" |
| 131 | + |
| 132 | + SETTINGS = FastPublisherSettings |
| 133 | + |
| 134 | + PUB = FastPublisher() |
| 135 | + SUB = SlowSubscriber() |
| 136 | + |
| 137 | + def configure(self) -> None: |
| 138 | + num_msgs = self.SETTINGS.num_messages |
| 139 | + self.PUB.apply_settings( |
| 140 | + FastPublisherSettings( |
| 141 | + num_messages=num_msgs, |
| 142 | + publish_interval_sec=self.SETTINGS.publish_interval_sec, |
| 143 | + ) |
| 144 | + ) |
| 145 | + self.SUB.apply_settings( |
| 146 | + SlowSubscriberSettings(process_time_sec=1.0, expected_messages=num_msgs) |
| 147 | + ) |
| 148 | + |
| 149 | + def network(self) -> ez.NetworkDefinition: |
| 150 | + return ((self.PUB.OUTPUT, self.SUB.INPUT),) |
| 151 | + |
| 152 | + |
| 153 | +if __name__ == "__main__": |
| 154 | + print("Leaky Subscriber Demo", flush=True) |
| 155 | + print("=" * 60, flush=True) |
| 156 | + print("Publisher: 20 messages at 10 Hz (100ms intervals)", flush=True) |
| 157 | + print("Subscriber: Processes at 1 Hz (1000ms per message)", flush=True) |
| 158 | + print("Queue: max_queue=3, leaky=True", flush=True) |
| 159 | + print("=" * 60, flush=True) |
| 160 | + print("\nExpected behavior:", flush=True) |
| 161 | + print("- Publisher sends 20 messages over ~2 seconds", flush=True) |
| 162 | + print("- Subscriber can only process ~1 message per second", flush=True) |
| 163 | + print("- Many messages will be dropped to keep subscriber current", flush=True) |
| 164 | + print("=" * 60 + "\n", flush=True) |
| 165 | + |
| 166 | + settings = FastPublisherSettings(num_messages=20, publish_interval_sec=0.1) |
| 167 | + system = LeakyDemo(settings) |
| 168 | + ez.run(DEMO=system) |
0 commit comments