Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'timestamp': dates_repeated,
})
if ffilled_values is None:
ffilled_values = baseline.value.iloc[:nassets]
updated_values = baseline.value.iloc[nassets:]
expected_views = keymap(pd.Timestamp, {
'2014-01-03': [ffilled_values],
'2014-01-04': [updated_values],
})
with tmp_asset_finder(equities=simple_asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv(ffilled_values, updated_values)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(simple_asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
bz.data(baseline, name='expr', dshape=self.value_dshape),
None,
bz.data(
checkpoints,
name='expr_checkpoints',
dshape=self.value_dshape,
),
expected_views,
def compute_sorted_frame(df, order_by, group_by=(), **kwargs):
computed_sort_keys = []
sort_keys = list(toolz.concatv(group_by, order_by))
ascending = [getattr(key.op(), 'ascending', True) for key in sort_keys]
new_columns = {}
for i, key in enumerate(map(operator.methodcaller('op'), sort_keys)):
computed_sort_key, temporary_column = compute_sort_key(
key, df, **kwargs
)
computed_sort_keys.append(computed_sort_key)
if temporary_column is not None:
new_columns[computed_sort_key] = temporary_column
result = df.assign(**new_columns)
result = result.sort_values(
computed_sort_keys, ascending=ascending, kind='mergesort'
)
treated as a script and executed. If it does not end in ``.py`` it is
treated as a module to be imported.
strict : bool
Should failure to load an extension raise. If this is false it will
still warn.
environ : mapping
The environment to use to find the default extension path.
reload : bool, optional
Reload any extensions that have already been loaded.
"""
if default:
default_extension_path = pth.default_extension(environ=environ)
pth.ensure_file(default_extension_path)
# put the default extension first so other extensions can depend on
# the order they are loaded
extensions = concatv([default_extension_path], extensions)
for ext in extensions:
if ext in _loaded_extensions and not reload:
continue
try:
# load all of the catalyst extensionss
if ext.endswith('.py'):
run_path(ext, run_name='')
else:
__import__(ext)
except Exception as e:
if strict:
# if `strict` we should raise the actual exception and fail
raise
# without `strict` we should just log the failure
warnings.warn(
first_seen_add(p.point_key)
seen_add(p)
self_reflect = []
mirror_reflect = []
reflect = []
self_contract = [point]
contract = []
while True:
next_self_reflect = []
next_mirror_reflect = []
next_reflect = []
next_self_contract = []
next_contract = []
for p in concatv(
interleave(x.get_reflections() for x in self_reflect),
interleave(x.get_reflections() for x in mirror_reflect),
interleave(x.get_reflections() for x in reflect),
interleave(x.get_reflections() for x in self_contract),
interleave(x.get_reflections() for x in contract),
):
if p.point_key not in first_seen:
stencil_points_append(p)
yield p
first_seen_add(p.point_key)
seen_add(p)
next_reflect.append(p)
elif p not in seen:
seen_add(p)
if p.index == 0:
next_self_reflect.append(p)
if sort_keys:
result, grouping_keys, ordering_keys = util.compute_sorted_frame(
result, order_by=sort_keys, scope=scope, **kwargs
)
else:
grouping_keys = ordering_keys = ()
# return early if we do not have any temporary grouping or ordering columns
assert not grouping_keys, 'group by should never show up in Selection'
if not ordering_keys:
return result
# create a sequence of columns that we need to drop
temporary_columns = pd.Index(
concatv(grouping_keys, ordering_keys)
).difference(data.columns)
# no reason to call drop if we don't need to
if temporary_columns.empty:
return result
# drop every temporary column we created for ordering or grouping
return result.drop(temporary_columns, axis=1)
def update_usage_search_locations(self, platform: str):
'''Update the places where usages are found
Call this whenever you load new modules or scripts.
'''
if platform.lower().startswith('python'):
from . import jedi_dump
jedi_dump.JediCodeElementNode.usage_resolution_modules = (
frozenset((nn.module_context for nn in
tz.concatv(self.module_nodes[platform].values(),
self.script_nodes[platform].values())
if nn.code_element.path)))
return self.copy(lambda s: concatv(s, self.strict, other.source, other.strict), lambda s: List())
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],