Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def to_representation(self, instance):
value = super(ChannelMetadataSerializer, self).to_representation(instance)
# if the request includes a GET param 'include_fields', add the requested calculated fields
if "request" in self.context:
include_fields = (
self.context["request"].GET.get("include_fields", "").split(",")
)
if include_fields:
# build querysets for the full set of channel nodes, as well as those that are unrenderable
channel_nodes = ContentNode.objects.filter(channel_id=instance.id)
unrenderable_nodes = channel_nodes.exclude(
renderable_contentnodes_without_topics_q_filter
)
if "total_resources" in include_fields:
# count the total number of renderable non-topic resources in the channel
# (note: it's faster to count them all and then subtract the unrenderables, of which there are fewer)
value["total_resources"] = (
channel_nodes.dedupe_by_content_id().count()
- unrenderable_nodes.dedupe_by_content_id().count()
)
if "total_file_size" in include_fields:
# count the total file size of files associated with renderable content nodes
# (note: it's faster to count them all and then subtract the unrenderables, of which there are fewer)
value["total_file_size"] = total_file_size(
def to_representation(self, instance):
value = super(ChannelMetadataSerializer, self).to_representation(instance)
# if the request includes a GET param 'include_fields', add the requested calculated fields
if "request" in self.context:
include_fields = (
self.context["request"].GET.get("include_fields", "").split(",")
)
if include_fields:
# build querysets for the full set of channel nodes, as well as those that are unrenderable
channel_nodes = ContentNode.objects.filter(channel_id=instance.id)
unrenderable_nodes = channel_nodes.exclude(
renderable_contentnodes_without_topics_q_filter
)
if "total_resources" in include_fields:
# count the total number of renderable non-topic resources in the channel
# (note: it's faster to count them all and then subtract the unrenderables, of which there are fewer)
value["total_resources"] = (
channel_nodes.dedupe_by_content_id().count()
- unrenderable_nodes.dedupe_by_content_id().count()
)
if "total_file_size" in include_fields:
# count the total file size of files associated with renderable content nodes
# (note: it's faster to count them all and then subtract the unrenderables, of which there are fewer)
value["total_file_size"] = total_file_size(
if coaches:
coach = random.choice(coaches)
else:
members = facility.get_members()
if not members:
coach = FacilityUser.objects.create(username="coach", facility=facility)
coach.set_password("password")
coach.save()
else:
coach = random.choice(members)
facility.add_coach(coach)
for count in range(num_lessons):
channel = random.choice(channels)
channel_content = ContentNode.objects.filter(channel_id=channel.id)
# don't add more than 10 resources per Lesson:
n_content_items = min(random.randint(0, channel_content.count() - 1), 10)
lesson_content = []
for i in range(0, n_content_items):
# Use this to randomly select a content node to generate the interaction for
random_node = random.choice(channel_content)
content = {
"contentnode_id": random_node.id,
"channel_id": channel.id,
"content_id": random_node.content_id,
}
lesson_content.append(content)
lesson = Lesson.objects.create(
title="Lesson {}-{a}".format(count, a=random.choice("ABCDEF")),
resources=lesson_content,
if coaches:
coach = random.choice(coaches)
else:
members = facility.get_members()
if not members:
coach = FacilityUser.objects.create(username="coach", facility=facility)
coach.set_password("password")
coach.save()
else:
coach = random.choice(members)
facility.add_coach(coach)
for count in range(num_exams):
# exam questions can come from different channels
exercise_content = ContentNode.objects.filter(
kind=content_kinds.EXERCISE
).filter(~Q(assessmentmetadata__assessment_item_ids=[]))
# don't add more than 3 resources per:
n_content_items = min(exercise_content.count(), 3)
exam_content = []
content_ids = []
assessment_ids = []
for i in range(0, n_content_items):
# Use this to randomly select an exercise content node to generate the interaction for
random_node = random.choice(exercise_content)
# grab this exercise node's assessment ids
assessment_item_ids = (
random_node.assessmentmetadata.first().assessment_item_ids
)
# randomly select one of the questions in the exercise and store the ids for the exam attempt logs
assessment_ids.append(random.choice(assessment_item_ids))
def get_queryset(self):
return models.ContentNode.objects.filter(available=True)
job.extra_metadata["total_resources"] = total_resource_count
job.save_meta()
number_of_skipped_files = 0
transferred_file_size = 0
file_checksums_to_annotate = []
public = None
# If we're downloading, check listing status
if method == DOWNLOAD_METHOD:
public = lookup_channel_listing_status(
channel_id=channel_id, baseurl=baseurl
)
resources_before_transfer = (
ContentNode.objects.filter(channel_id=channel_id, available=True)
.exclude(kind=content_kinds.TOPIC)
.values("content_id")
.distinct()
.count()
)
dummy_bytes_for_annotation = annotation.calculate_dummy_progress_for_annotation(
node_ids, exclude_node_ids, total_bytes_to_transfer
)
with self.start_progress(
total=total_bytes_to_transfer + dummy_bytes_for_annotation
) as overall_progress_update:
exception = None # Exception that is not caught by the retry logic
if method == DOWNLOAD_METHOD:
def cache_content_title(content_id):
key = '{id}_ContentNode_title'.format(id=content_id)
title = cache.get(key)
if title is None:
node = ContentNode.objects.filter(content_id=content_id).first()
if node:
title = node.title
else:
title = ""
cache.set(key, title, 60 * 10)
return title