Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
root_json_dict=None,
schema_parser=None,
root_list_path=None,
root_id="ocid",
use_titles=False,
xml=False,
id_name="id",
filter_field=None,
filter_value=None,
preserve_fields=None,
remove_empty_schema_columns=False,
rollup=False,
truncation_length=3,
):
self.sub_sheets = {}
self.main_sheet = Sheet()
self.root_list_path = root_list_path
self.root_id = root_id
self.use_titles = use_titles
self.truncation_length = truncation_length
self.id_name = id_name
self.xml = xml
self.filter_field = filter_field
self.filter_value = filter_value
self.remove_empty_schema_columns = remove_empty_schema_columns
self.seen_paths = set()
if schema_parser:
self.main_sheet = copy.deepcopy(schema_parser.main_sheet)
self.sub_sheets = copy.deepcopy(schema_parser.sub_sheets)
if remove_empty_schema_columns:
# Don't use columns from the schema parser
yield property_name, title
else:
raise ValueError
elif "object" in type_set:
if title:
title_lookup[title].property_name = property_name
sub_sheet_name = make_sub_sheet_name(
parent_path,
property_name,
truncation_length=self.truncation_length,
)
# self.sub_sheet_mapping[parent_name+'/'+property_name] = sub_sheet_name
if sub_sheet_name not in self.sub_sheets:
self.sub_sheets[sub_sheet_name] = Sheet(
root_id=self.root_id, name=sub_sheet_name
)
sub_sheet = self.sub_sheets[sub_sheet_name]
sub_sheet.title_lookup = title_lookup.get(title)
for field in id_fields:
sub_sheet.add_field(field, id_field=True)
sub_sheet.titles[title_lookup.lookup_header(field)] = field
fields = self.parse_schema_dict(
parent_path + property_name + "/0",
property_schema_dict["items"],
parent_id_fields=id_fields,
title_lookup=title_lookup.get(title),
parent_title=parent_title + title + ":"
if parent_title is not None and title
else None,
def __init__(self, xml_schemas=[], root_list_path=None):
self.sub_sheets = {}
self.main_sheet = Sheet()
self.sub_sheet_mapping = {}
self.xml_schemas = xml_schemas
assert root_list_path is not None
self.root_list_path = root_list_path
def __init__(
self,
schema_filename=None,
root_schema_dict=None,
rollup=False,
root_id=None,
use_titles=False,
disable_local_refs=False,
truncation_length=3,
exclude_deprecated_fields=False,
):
self.sub_sheets = {}
self.main_sheet = Sheet()
self.sub_sheet_mapping = {}
self.do_rollup = rollup
self.rollup = set()
self.root_id = root_id
self.use_titles = use_titles
self.truncation_length = truncation_length
self.title_lookup = TitleLookup()
self.flattened = {}
self.exclude_deprecated_fields = exclude_deprecated_fields
if root_schema_dict is None and schema_filename is None:
raise ValueError(
"One of schema_filename or root_schema_dict must be supplied"
)
if root_schema_dict is not None and schema_filename is not None:
raise ValueError(
] = "WARNING: More than one value supplied, consult the relevant sub-sheet for the data."
elif parent_name + key in self.rollup:
warn(
'More than one value supplied for "{}". Could not provide rollup, so adding a warning to the relevant cell(s) in the spreadsheet.'.format(
parent_name + key
)
)
flattened_dict[
sheet_key(sheet, parent_name + key + "/0/" + k)
] = "WARNING: More than one value supplied, consult the relevant sub-sheet for the data."
sub_sheet_name = make_sub_sheet_name(
parent_name, key, truncation_length=self.truncation_length
)
if sub_sheet_name not in self.sub_sheets:
self.sub_sheets[sub_sheet_name] = Sheet(name=sub_sheet_name)
for json_dict in value:
if json_dict is None:
continue
self.parse_json_dict(
json_dict,
sheet=self.sub_sheets[sub_sheet_name],
json_key=key,
parent_id_fields=parent_id_fields,
parent_name=parent_name + key + "/0/",
top_level_of_sub_sheet=True,
)
else:
raise ValueError("Unsupported type {}".format(type(value)))
if top: