How to use rfc3339 - 10 common examples

To help you get started, we’ve selected a few rfc3339 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github atareao / calendar-indicator / src / googlecalendarapi.py View on Github external
def _get_date(self,typeofdate):
        daybefore = datetime.datetime.now(LocalTZ())
        if 'recurrence' in self.keys():
            rset = dateutil.rrule.rruleset()
            for el in self['recurrence']:
                if el.find('DTSTART') == -1:
                    if 'date' in self[typeofdate].keys():
                        dtstart = self[typeofdate]['date']+'T00:00:00'+ get_utc_offset(daybefore)
                        print(self[typeofdate]['date'])
                    elif 'dateTime' in self[typeofdate].keys():
                        dtstart = self[typeofdate]['dateTime']
                        print(self[typeofdate]['dateTime'])
                    print(1,dtstart)
                    dtstart = rfc3339.parse_datetime(dtstart)
                    print(2,dtstart)
                    if el.find('UNTIL') != -1:
                        elements = el.split(';')
                        ans = ''
                        for element in elements:
                            if element.startswith('UNTIL='):
                                s,e=element.split("=")
                                if len(e) == 8:
                                    e += 'T000000'+ get_utc_offset(daybefore).replace(':','')
                                elif len(e) == 17:
                                    e += get_utc_offset(daybefore)
                                element = s+'='+e
                            ans += element+';'
                        if ans.endswith(';'):
                            ans = ans[:-1]
                        el = ans
github brvier / KhtNotes / khtnotes / importer.py View on Github external
if os.path.exists(path + '.txt'):
                    index = 2
                    while (os.path.exists(os.path.join(
                            note.NOTESPATH, '%s %s.txt'
                            % (path,
                                unicode(index))))):
                        index = index + 1
                    uuid = ('%s %s'
                            % (os.path.basename(path),
                            unicode(index)))

                note.uuid = uuid + '.txt'
                note.write(handler._content)
                try:
                    from rfc3339.rfc3339 import strtotimestamp
                    mtime = strtotimestamp(handler._last_change)
                    lpath = os.path.join(Note.NOTESPATH, note.uuid)
                    os.utime(lpath, (-1, mtime))
                except Exception:
                    import traceback
                    print traceback.format_exc()

            except Exception:
                import traceback
                print traceback.format_exc()

        self._set_running(False)
        self.on_finished.emit()
github ConorWilliams / rsinc / rsinc / rclone.py View on Github external
command + track.rclone_flags, stdout=subprocess.PIPE
    )
    list_of_dicts = ujson.load(result.stdout)

    command = ["rclone", "hashsum", hash_name, path]
    result = subprocess.Popen(command, stdout=subprocess.PIPE)
    hashes = {}

    for file in result.stdout:
        decode = file.decode(RCLONE_ENCODING).strip()
        tmp = decode.split("  ", 1)
        hashes[tmp[1]] = tmp[0]

    out = Flat(path)
    for d in list_of_dicts:
        time = strtotimestamp(d["ModTime"])
        hashsize = str(d["Size"])

        hash = hashes.get(d["Path"], None)
        if hash is not None:
            hashsize += hash
        else:
            print(red("ERROR:"), "can't find", d["Path"], "hash")
            continue

        out.update(d["Path"], hashsize, time)

    return out
github LibraryOfCongress / chronam / apps / chronam-core / chronam / core / rdf.py View on Github external
def batch_to_graph(b):
    g = make_graph()
    uri = abstract_uri(b)

    g.add((uri, RDF.type, NDNP['Batch']))
    g.add((uri, DCTERMS['created'], Literal(rfc3339(b.created), 
                                            datatype=XSD.dateTime)))
    g.add((uri, DCTERMS['title'], Literal(b.name)))
    g.add((uri, DCTERMS['creator'], abstract_uri(b.awardee)))
    g.add((uri, NDNP['bag'], URIRef('/data/' + b.bag_relative_path)))
    for issue in b.issues.all():
        g.add((uri, ORE['aggregates'], abstract_uri(issue)))
    add_rem(g, uri, rdf_uri(b))

    return g
github LibraryOfCongress / chronam / core / management / commands / update_sitemap.py View on Github external
# if we've maxed out the number of urls per sitemap 
        # close out the one we have open and open a new one
        if url_count % max_urls == 0:
            page_count += 1
            if sitemap_file:
                sitemap.write('\n')
                sitemap.close()
            sitemap_file = 'sitemap-%05d.xml' % page_count
            sitemap_path = 'static/sitemaps/%s' % sitemap_file
            _logger.info("writing %s" % sitemap_path)
            sitemap = open(sitemap_path, 'w')
            sitemap.write('\n\n')
            sitemap_index.write('http://chroniclingamerica.loc.gov/%s\n' % sitemap_file)

        # add a url to the sitemap
        sitemap.write("http://chroniclingamerica.loc.gov%s%s\n" % (loc, rfc3339(last_mod)))
        url_count += 1

        # necessary to avoid memory bloat when settings.DEBUG = True
        if url_count % 1000 == 0:
            reset_queries()

    try:
        # wrap up some open files. do this only if we had release candidates 
        # if not, accessing sitemap variable will cause an error
        sitemap.write('\n')
        sitemap.close()
    except NameError:
        _logger.info("No release candidates this time.")
        pass

    sitemap_index.write('\n')
github open-oni / open-oni / core / views / reports.py View on Github external
def batches_atom(request, page_number=1):
    batches = models.Batch.viewable_batches()
    batches = batches.order_by('-created')
    now = rfc3339(timezone.now())

    paginator = Paginator(batches, 25)
    page = paginator.page(page_number)
    return render(request, 'reports/batches.xml', locals(),
                  content_type='application/atom+xml')
github lillchan / google-calendar-app / app.py View on Github external
def datetime_combine_rfc3339(date, time):
    combined = datetime.datetime.combine(date, time)
    rfc3339_datetime = rfc3339(combined)
    return rfc3339_datetime
github open-oni / open-oni / core / views / search.py View on Github external
# and the previous page number
    if page.has_previous():
        query['page'] = paginator._cur_page - 1
        previous_url = '?' + query.urlencode()

    rows = query.get("rows", "20")
    sort = query.get("sort", default="relevance")
    seq_check = "checked" if query.get("sequence", "0") == "1" else ""

    crumbs = list(settings.BASE_CRUMBS)

    host = request.get_host()
    format = request.GET.get('format', None)
    if format == 'atom':
        feed_url = settings.BASE_URL + request.get_full_path()
        updated = rfc3339(timezone.now())
        return render(request, 'search/search_pages_results.xml', locals(),
                      content_type='application/atom+xml')
    elif format == 'json':
        results = {
            'startIndex': start,
            'endIndex': end,
            'totalItems': paginator.count,
            'itemsPerPage': rows,
            'items': [p.solr_doc for p in page.object_list],
        }
        for i in results['items']:
            i['url'] = settings.BASE_URL + i['id'].rstrip('/') + '.json'
        json_text = json.dumps(results, indent=2)
        # jsonp?
        if request.GET.get('callback') is not None:
            json_text = "%s(%s);" % (request.GET.get('callback'), json_text)
github LibraryOfCongress / chronam / apps / chronam-web / chronam / web / views.py View on Github external
page_range_short = list(_page_range_short(paginator, page))

    query = request.GET.copy()
    if page.has_next():
        query['page'] = curr_page + 1
        next_url = '?' + query.urlencode()
    if page.has_previous():
        query['page'] = curr_page - 1
        previous_url = '?' + query.urlencode()

    host = request.get_host()
    format = request.GET.get('format', None)
    if format == 'atom':
        feed_url = 'http://' + host + request.get_full_path()
        updated = rfc3339(datetime.datetime.now())
        return render_to_response('search_titles_results.xml',
                                  dictionary=locals(),
                                  context_instance=RequestContext(request),
                                  mimetype='application/atom+xml')

    elif format == 'json':
        results = [t.solr_doc for t in page.object_list]
        return HttpResponse(json.dumps(results), mimetype='application/json')

    sort = request.GET.get('sort', 'relevance')

    q = request.GET.copy()
    if 'page' in q:
        del q['page']
    if 'sort' in q:
        del q['sort']
github open-oni / open-oni / core / rdf.py View on Github external
def add_rem(g, uri_a, uri_r):
    """
    adds assertions about the aggregate resource (uri_a) and the
    the resource map (uri_r) that describes it using the oai-ore vocabulary
    http://www.openarchives.org/ore/1.0/datamodel.html
    """
    g.add((uri_a, ORE['isDescribedBy'], uri_r))
    g.add((uri_r, RDF.type, ORE['ResourceMap']))
    g.add((uri_r, ORE['describes'], uri_a))
    g.add((uri_r, DCTERMS['creator'], URIRef('http://chroniclingamerica.loc.gov/awardees/dlc#awardee')))

    # TODO: would be nice if created and modified were more real somehow
    # so oai-ore bots would know when resources needed to be harvested...
    t = rfc3339(timezone.now())
    g.add((uri_r, DCTERMS['created'], Literal(t, datatype=XSD.dateTime)))
    g.add((uri_r, DCTERMS['modified'], Literal(t, datatype=XSD.dateTime)))

    return g

rfc3339

Format dates according to the RFC 3339.

ISC
Latest version published 5 years ago

Package Health Score

49 / 100
Full package analysis