Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
elif data_type == TICKER:
df = df.drop(['pair', 'feed'], axis=1)
chunk_size = 'D'
elif data_type in { L2_BOOK, L3_BOOK }:
chunk_size = 'T'
elif data_type == FUNDING:
chunk_size = 'D'
elif data_type == OPEN_INTEREST:
df = df.drop(['pair', 'feed'], axis=1)
chunk_size = 'D'
df.set_index('date', inplace=True)
# All timestamps are in UTC
df.index = df.index.tz_localize(None)
if exchange not in self.con.list_libraries():
self.con.initialize_library(exchange, lib_type=StorageEngines.arctic.CHUNK_STORE)
self.con[exchange].append(f"{data_type}-{pair}", df, upsert=True, chunk_size=chunk_size)
def __init__(self, connection: str):
self.data = []
self.con = StorageEngines.arctic.Arctic(connection)
def aws_write(bucket, key, data, creds=(None, None), endpoint=None):
client = StorageEngines.boto3.client('s3',
aws_access_key_id=creds[0],
aws_secret_access_key=creds[1],
endpoint_url=endpoint
)
with open(data, 'rb') as fp:
client.upload_fileobj(fp, bucket, key)
def __init__(self, ip, port, flush=False):
self.conn = {}
self.ip = ip
self.port = port
self.ids = {}
if flush:
kafka = StorageEngines['confluent_kafka.admin']
ac = kafka.admin.AdminClient({'bootstrap.servers': f"{ip}:{port}"})
topics = list(ac.list_topics().topics.keys())
for topic, status in ac.delete_topics(topics).items():
try:
status.result()
LOG.info("Topic %s deleted", topic)
except Exception as e:
LOG.warning("Failed to delete topic %s: %s", topic, e)
def _conn(self, key):
if key not in self.conn:
self.ids[key] = None
kafka = StorageEngines.confluent_kafka
self.conn[key] = kafka.Consumer({'bootstrap.servers': f"{self.ip}:{self.port}",
'client.id': f'cryptostore-{key}',
'enable.auto.commit': False,
'group.id': f'cryptofeed-{key}',
'max.poll.interval.ms': 3000000})
self.conn[key].subscribe([key])
return self.conn[key]
def __init__(self, ip=None, port=None, socket=None, del_after_read=True, flush=False, retention=None):
self.del_after_read = del_after_read
self.retention = retention
self.last_id = {}
self.ids = defaultdict(list)
if ip and port and socket:
raise ValueError("Cannot specify ip/port and socket for Redis")
self.conn = StorageEngines.redis.Redis(ip, port, unix_socket_path=socket, decode_responses=True)
if flush:
LOG.info('Flushing cache')
self.conn.flushall()
def aws_list(bucket, key, creds=(None, None), limit=None, endpoint=None):
client = StorageEngines.boto3.client('s3',
aws_access_key_id=creds[0],
aws_secret_access_key=creds[1],
endpoint_url=endpoint
)
objs = client.list_objects_v2(Bucket=bucket, Prefix=key)
if objs and 'Contents' in objs:
ret = []
if limit:
for obj in objs['Contents']:
ret.append(obj['Key'])
limit -= 1
if not limit:
break
return ret
else:
def _get_bucket(bucket, creds):
google = StorageEngines['google.cloud.storage']
if creds:
client = google.cloud.storage.Client.from_service_account_json(creds)
else:
# defaults env var GOOGLE_APPLICATION_CREDENTIALS, or on box creds if on GCE
client = google.cloud.storage.Client()
return client.get_bucket(bucket)
import itertools
import logging
import requests
from cryptostore.data.store import Store
LOG = logging.getLogger('cryptostore')
def chunk(iterable, length):
return (iterable[i : i + length] for i in range(0, len(iterable), length))
class ElasticSearch(Store):
def __init__(self, config: dict):
self.data = None
self.host = config.host
self.user = config.user
self.token = config.token
self.settings = {'settings': {
"index" : {
"number_of_shards" : config.shards,
"number_of_replicas" : config.replicas,
"refresh_interval": config.refresh_interval
}
}
}
def aggregate(self, data):
self.data = data
'''
Copyright (C) 2018-2020 Bryant Moscon - bmoscon@gmail.com
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import pandas as pd
from cryptofeed.defines import TRADES, L2_BOOK, L3_BOOK, TICKER, FUNDING, OPEN_INTEREST
from cryptostore.data.store import Store
from cryptostore.engines import StorageEngines
class Arctic(Store):
def __init__(self, connection: str):
self.data = []
self.con = StorageEngines.arctic.Arctic(connection)
def aggregate(self, data):
self.data = data
def write(self, exchange, data_type, pair, timestamp):
chunk_size = None
if not self.data:
return
df = pd.DataFrame(self.data)
self.data = []
df['date'] = pd.to_datetime(df['timestamp'], unit='s')
df['receipt_timestamp'] = pd.to_datetime(df['receipt_timestamp'], unit='s')