How to use the rfc3339.rfc3339 function in rfc3339

To help you get started, we’ve selected a few rfc3339 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github LibraryOfCongress / chronam / apps / chronam-core / chronam / core / rdf.py View on Github external
def batch_to_graph(b):
    g = make_graph()
    uri = abstract_uri(b)

    g.add((uri, RDF.type, NDNP['Batch']))
    g.add((uri, DCTERMS['created'], Literal(rfc3339(b.created), 
                                            datatype=XSD.dateTime)))
    g.add((uri, DCTERMS['title'], Literal(b.name)))
    g.add((uri, DCTERMS['creator'], abstract_uri(b.awardee)))
    g.add((uri, NDNP['bag'], URIRef('/data/' + b.bag_relative_path)))
    for issue in b.issues.all():
        g.add((uri, ORE['aggregates'], abstract_uri(issue)))
    add_rem(g, uri, rdf_uri(b))

    return g
github LibraryOfCongress / chronam / core / management / commands / update_sitemap.py View on Github external
# if we've maxed out the number of urls per sitemap 
        # close out the one we have open and open a new one
        if url_count % max_urls == 0:
            page_count += 1
            if sitemap_file:
                sitemap.write('\n')
                sitemap.close()
            sitemap_file = 'sitemap-%05d.xml' % page_count
            sitemap_path = 'static/sitemaps/%s' % sitemap_file
            _logger.info("writing %s" % sitemap_path)
            sitemap = open(sitemap_path, 'w')
            sitemap.write('\n\n')
            sitemap_index.write('http://chroniclingamerica.loc.gov/%s\n' % sitemap_file)

        # add a url to the sitemap
        sitemap.write("http://chroniclingamerica.loc.gov%s%s\n" % (loc, rfc3339(last_mod)))
        url_count += 1

        # necessary to avoid memory bloat when settings.DEBUG = True
        if url_count % 1000 == 0:
            reset_queries()

    try:
        # wrap up some open files. do this only if we had release candidates 
        # if not, accessing sitemap variable will cause an error
        sitemap.write('\n')
        sitemap.close()
    except NameError:
        _logger.info("No release candidates this time.")
        pass

    sitemap_index.write('\n')
github open-oni / open-oni / core / views / reports.py View on Github external
def batches_atom(request, page_number=1):
    batches = models.Batch.viewable_batches()
    batches = batches.order_by('-created')
    now = rfc3339(timezone.now())

    paginator = Paginator(batches, 25)
    page = paginator.page(page_number)
    return render(request, 'reports/batches.xml', locals(),
                  content_type='application/atom+xml')
github lillchan / google-calendar-app / app.py View on Github external
def datetime_combine_rfc3339(date, time):
    combined = datetime.datetime.combine(date, time)
    rfc3339_datetime = rfc3339(combined)
    return rfc3339_datetime
github open-oni / open-oni / core / views / search.py View on Github external
# and the previous page number
    if page.has_previous():
        query['page'] = paginator._cur_page - 1
        previous_url = '?' + query.urlencode()

    rows = query.get("rows", "20")
    sort = query.get("sort", default="relevance")
    seq_check = "checked" if query.get("sequence", "0") == "1" else ""

    crumbs = list(settings.BASE_CRUMBS)

    host = request.get_host()
    format = request.GET.get('format', None)
    if format == 'atom':
        feed_url = settings.BASE_URL + request.get_full_path()
        updated = rfc3339(timezone.now())
        return render(request, 'search/search_pages_results.xml', locals(),
                      content_type='application/atom+xml')
    elif format == 'json':
        results = {
            'startIndex': start,
            'endIndex': end,
            'totalItems': paginator.count,
            'itemsPerPage': rows,
            'items': [p.solr_doc for p in page.object_list],
        }
        for i in results['items']:
            i['url'] = settings.BASE_URL + i['id'].rstrip('/') + '.json'
        json_text = json.dumps(results, indent=2)
        # jsonp?
        if request.GET.get('callback') is not None:
            json_text = "%s(%s);" % (request.GET.get('callback'), json_text)
github LibraryOfCongress / chronam / apps / chronam-web / chronam / web / views.py View on Github external
page_range_short = list(_page_range_short(paginator, page))

    query = request.GET.copy()
    if page.has_next():
        query['page'] = curr_page + 1
        next_url = '?' + query.urlencode()
    if page.has_previous():
        query['page'] = curr_page - 1
        previous_url = '?' + query.urlencode()

    host = request.get_host()
    format = request.GET.get('format', None)
    if format == 'atom':
        feed_url = 'http://' + host + request.get_full_path()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response('search_titles_results.xml',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request),
                                  mimetype='application/atom+xml')

    elif format == 'json':
        results = [t.solr_doc for t in page.object_list]
        return HttpResponse(json.dumps(results), mimetype='application/json')

    sort = request.GET.get('sort', 'relevance')

    q = request.GET.copy()
    if 'page' in q:
        del q['page']
    if 'sort' in q:
        del q['sort']
github open-oni / open-oni / core / rdf.py View on Github external
def add_rem(g, uri_a, uri_r):
    """
    adds assertions about the aggregate resource (uri_a) and the
    the resource map (uri_r) that describes it using the oai-ore vocabulary
    http://www.openarchives.org/ore/1.0/datamodel.html
    """
    g.add((uri_a, ORE['isDescribedBy'], uri_r))
    g.add((uri_r, RDF.type, ORE['ResourceMap']))
    g.add((uri_r, ORE['describes'], uri_a))
    g.add((uri_r, DCTERMS['creator'], URIRef('http://chroniclingamerica.loc.gov/awardees/dlc#awardee')))

    # TODO: would be nice if created and modified were more real somehow
    # so oai-ore bots would know when resources needed to be harvested...
    t = rfc3339(timezone.now())
    g.add((uri_r, DCTERMS['created'], Literal(t, datatype=XSD.dateTime)))
    g.add((uri_r, DCTERMS['modified'], Literal(t, datatype=XSD.dateTime)))

    return g
github dsebastien / youtubeChannelVideosFinder / youtubeChannelVideosFinder.py View on Github external
goBackTo = startFrom - timeInterval
	
	done = False
	
	while not done:
		if(goBackTo < dateToGoBackTo):
			log.debug('The interval is now larger than the remaining time span to retrieve videos for. Using the date to go back to as next boundary')
			goBackTo = dateToGoBackTo
		
		if(goBackTo == dateToGoBackTo):
			log.debug('Last round-trip')
			done = True
		
		log.debug('Converting timestamps to RFC3339 format')
		goBackTo_rfc3339 = rfc3339(goBackTo,utc=True)
		startFrom_rfc3339 = rfc3339(startFrom,utc=True)
		
		videosPublishedInInterval = getChannelVideosPublishedInInterval(channelId,startFrom_rfc3339,goBackTo_rfc3339)
		
		log.debug('Adding videos found in the interval to the results list')
		retVal.extend(videosPublishedInInterval)
		log.debug('Total video(s) found so far: %d',len(retVal))
		
		if(not done):
			# we simply continue from where we are
			startFrom = goBackTo
			
			# calculate the next date to go back to based on the given interval
			nextDate = goBackTo - timeInterval
			log.debug('Calculating the next date to go back to based on the interval: %s - %s => %s',goBackTo,timeInterval,nextDate)
			goBackTo = nextDate
github chrisgervang / reflectrum / legacy / g_cal.py View on Github external
def start_time():
    """
    Returns the RFC3339 time stamp for the start of today.
    """
    d = datetime.datetime.today()
    start = datetime.datetime(d.year, d.month, d.day, 0, 0, 0, 0)
    return rfc3339(start)
github aehlke / django-catnap / catnap / serializers.py View on Github external
ret = None

        if isinstance(thing, QuerySet):
            # Actually its the same as a list ...
            ret = _list(thing)
        elif isinstance(thing, ErrorList) and hasattr(thing, 'get_json_data'):
            ret = thing.get_json_data(escape_html=False)
        elif isinstance(thing, (tuple, list, set)):
            ret = _list(thing)
        elif isinstance(thing, dict):
            ret = _dict(thing)
        elif isinstance(thing, decimal.Decimal):
            ret = str(thing)
        elif isinstance(thing,
                (datetime.datetime, datetime.date, datetime.time)):
            ret = rfc3339(thing, use_system_timezone=False)
        elif isinstance(thing, Model):
            # e.g. a single element from a queryset
            ret = _model(thing)
        #elif isinstance(thing, HttpResponse):
        #    raise HttpStatusCode(thing)
        # here we need to encode the string as unicode (otherwise we get utf-16 in the json-response)
        elif isinstance(thing, basestring):
            ret = unicode(thing)
        # see http://code.djangoproject.com/ticket/5868
        elif isinstance(thing, Promise):
            ret = force_unicode(thing)
        elif inspect.isfunction(thing):
            if not inspect.getargspec(thing)[0]:
                ret = _any(thing())
        elif hasattr(thing, '__emittable__'):
            f = thing.__emittable__

rfc3339

Format dates according to the RFC 3339.

ISC
Latest version published 5 years ago

Package Health Score

49 / 100
Full package analysis