Skip to content

Commit

Permalink
Merge pull request #236 from pikers/fsp_drunken_alignment
Browse files Browse the repository at this point in the history
Fsp drunken alignment
  • Loading branch information
goodboy authored Nov 3, 2021
2 parents 0a54ed7 + cbec7df commit 186d221
Show file tree
Hide file tree
Showing 6 changed files with 452 additions and 235 deletions.
14 changes: 9 additions & 5 deletions piker/data/_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ async def sample_and_broadcast(
if tick_throttle:
# this is a send mem chan that likely
# pushes to the ``uniform_rate_send()`` below.
await stream.send(quote)
await stream.send((sym, quote))

else:
await stream.send({sym: quote})
Expand Down Expand Up @@ -285,10 +285,14 @@ async def uniform_rate_send(

sleep_period = 1/rate - 0.000616
last_send = time.time()
aname = stream._ctx.chan.uid[0]
fsp = False
if 'fsp' in aname:
fsp = True

while True:

first_quote = await quote_stream.receive()
sym, first_quote = await quote_stream.receive()
start = time.time()

# append quotes since last iteration into the last quote's
Expand All @@ -301,7 +305,7 @@ async def uniform_rate_send(
#
while True:
try:
next_quote = quote_stream.receive_nowait()
sym, next_quote = quote_stream.receive_nowait()
ticks = next_quote.get('ticks')

if ticks:
Expand All @@ -312,12 +316,12 @@ async def uniform_rate_send(
rate = 1 / (now - last_send)
last_send = now

# print(f'{rate} Hz sending quotes') # \n{first_quote}')
# log.info(f'{rate} Hz sending quotes') # \n{first_quote}')

# TODO: now if only we could sync this to the display
# rate timing exactly lul
try:
await stream.send({first_quote['symbol']: first_quote})
await stream.send({sym: first_quote})
break
except trio.ClosedResourceError:
# if the feed consumer goes down then drop
Expand Down
57 changes: 49 additions & 8 deletions piker/data/_sharedmem.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
import numpy as np

from ..log import get_logger
from ._source import base_ohlc_dtype, base_iohlc_dtype
from ._source import base_iohlc_dtype


log = get_logger(__name__)
Expand Down Expand Up @@ -168,6 +168,7 @@ def __init__(

self._len = len(shmarr)
self._shm = shm
self._post_init: bool = False

# pushing data does not write the index (aka primary key)
self._write_fields = list(shmarr.dtype.fields.keys())[1:]
Expand Down Expand Up @@ -196,7 +197,24 @@ def index(self) -> int:

@property
def array(self) -> np.ndarray:
return self._array[self._first.value:self._last.value]
'''Return an up-to-date ``np.ndarray`` view of the
so-far-written data to the underlying shm buffer.
'''
a = self._array[self._first.value:self._last.value]

# first, last = self._first.value, self._last.value
# a = self._array[first:last]

# TODO: eventually comment this once we've not seen it in the
# wild in a long time..
# XXX: race where first/last indexes cause a reader
# to load an empty array..
if len(a) == 0 and self._post_init:
raise RuntimeError('Empty array race condition hit!?')
# breakpoint()

return a

def last(
self,
Expand All @@ -209,6 +227,7 @@ def push(
data: np.ndarray,

prepend: bool = False,
start: Optional[int] = None,

) -> int:
'''Ring buffer like "push" to append data
Expand All @@ -217,24 +236,41 @@ def push(
NB: no actual ring logic yet to give a "loop around" on overflow
condition, lel.
'''
self._post_init = True
length = len(data)
index = start or self._last.value

if prepend:
index = self._first.value - length
else:
index = self._last.value

if index < 0:
raise ValueError(
f'Array size of {self._len} was overrun during prepend.\n'
'You have passed {abs(index)} too many datums.'
)

end = index + length

fields = self._write_fields

try:
self._array[fields][index:end] = data[fields][:]

# NOTE: there was a race here between updating
# the first and last indices and when the next reader
# tries to access ``.array`` (which due to the index
# overlap will be empty). Pretty sure we've fixed it now
# but leaving this here as a reminder.
if prepend:
assert index < self._first.value

if index < self._first.value:
self._first.value = index
else:
self._last.value = end

return end

except ValueError as err:
# shoudl raise if diff detected
self.diff_err_fields(data)
Expand Down Expand Up @@ -290,20 +326,25 @@ def flush(self) -> None:


# how much is probably dependent on lifestyle
_secs_in_day = int(60 * 60 * 12)
_default_size = 2 * _secs_in_day
_secs_in_day = int(60 * 60 * 24)
# we try for 3 times but only on a run-every-other-day kinda week.
_default_size = 3 * _secs_in_day


def open_shm_array(

key: Optional[str] = None,
size: int = _default_size,
dtype: Optional[np.dtype] = None,
readonly: bool = False,

) -> ShmArray:
"""Open a memory shared ``numpy`` using the standard library.
'''Open a memory shared ``numpy`` using the standard library.
This call unlinks (aka permanently destroys) the buffer on teardown
and thus should be used from the parent-most accessor (process).
"""
'''
# create new shared mem segment for which we
# have write permission
a = np.zeros(size, dtype=dtype)
Expand Down
Loading

0 comments on commit 186d221

Please sign in to comment.