Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def absolute_uri(url=None):
if not url:
return options.get("system.url-prefix")
return urljoin(options.get("system.url-prefix").rstrip("/") + "/", url.lstrip("/"))
def get_from_email_domain():
global _from_email_domain_cache
from_ = options.get("mail.from")
if not _from_email_domain_cache[0] == from_:
_from_email_domain_cache = (from_, domain_from_email(from_))
return _from_email_domain_cache[1]
def build_auth(request, saml_config):
"""
Construct a OneLogin_Saml2_Auth object for the current request.
"""
url = urlparse(options.get("system.url-prefix"))
saml_request = {
"https": "on" if url.scheme == "https" else "off",
"http_host": url.hostname,
"script_name": request.META["PATH_INFO"],
"server_port": url.port,
"get_data": request.GET.copy(),
"post_data": request.POST.copy(),
}
return OneLogin_Saml2_Auth(saml_request, saml_config)
too_many_candidates = True
candidate_ids = []
sort_field = sort_strategies[sort_by]
chunk_growth = options.get("snuba.search.chunk-growth-rate")
max_chunk_size = options.get("snuba.search.max-chunk-size")
chunk_limit = limit
offset = 0
num_chunks = 0
hits = None
paginator_results = EMPTY_RESULT
result_groups = []
result_group_ids = set()
max_time = options.get("snuba.search.max-total-chunk-time-seconds")
time_start = time.time()
if count_hits and (too_many_candidates or cursor is not None):
# If we had too many candidates to reasonably pass down to snuba,
# or if we have a cursor that bisects the overall result set (such
# that our query only sees results on one side of the cursor) then
# we need an alternative way to figure out the total hits that this
# query has.
# To do this, we get a sample of groups matching the snuba side of
# the query, and see how many of those pass the post-filter in
# postgres. This should give us an estimate of the total number of
# snuba matches that will be overall matches, which we can use to
# get an estimate for X-Hits.
# The sampling is not simple random sampling. It will return *all*
def get_mail_backend():
backend = options.get("mail.backend")
try:
return settings.SENTRY_EMAIL_BACKEND_ALIASES[backend]
except KeyError:
return backend
def security_contact():
return options.get("system.security-email") or options.get("system.admin-email")
def get_secret(self, organization):
return options.get("github.integration-hook-secret")
def get_secret(self):
return options.get('github-app.webhook-secret')
# Both `start` and `end` must have been trimmed to `retention_date`,
# so this entire search was against a time range that is outside of
# retention. We'll return empty results to maintain backwards compatability
# with Django search (for now).
return EMPTY_RESULT
if start >= end:
# TODO: This maintains backwards compatability with Django search, but
# in the future we should find a way to notify the user that their search
# is invalid.
return EMPTY_RESULT
# Here we check if all the django filters reduce the set of groups down
# to something that we can send down to Snuba in a `group_id IN (...)`
# clause.
max_candidates = options.get("snuba.search.max-pre-snuba-candidates")
too_many_candidates = False
candidate_ids = list(group_queryset.values_list("id", flat=True)[: max_candidates + 1])
metrics.timing("snuba.search.num_candidates", len(candidate_ids))
if not candidate_ids:
# no matches could possibly be found from this point on
metrics.incr("snuba.search.no_candidates", skip_internal=False)
return EMPTY_RESULT
elif len(candidate_ids) > max_candidates:
# If the pre-filter query didn't include anything to significantly
# filter down the number of results (from 'first_release', 'query',
# 'status', 'bookmarked_by', 'assigned_to', 'unassigned',
# 'subscribed_by', 'active_at_from', or 'active_at_to') then it
# might have surpassed the `max_candidates`. In this case,
# we *don't* want to pass candidates down to Snuba, and instead we
# want Snuba to do all the filtering/sorting it can and *then* apply
# this queryset to the results from Snuba, which we call
logger.info('Generated installation ID: %s', install_id)
install_id = sha1(uuid4().hex).hexdigest()
options.set('sentry:install-id', install_id)
end = timezone.now()
events_24h = tsdb.get_sums(
model=tsdb.models.internal,
keys=['events.total'],
start=end - timedelta(hours=24),
end=end,
)['events.total']
payload = {
'install_id': install_id,
'version': sentry.get_version(),
'admin_email': options.get('system.admin-email'),
'data': {
# TODO(dcramer): we'd also like to get an idea about the throughput
# of the system (i.e. events in 24h)
'users': User.objects.count(),
'projects': Project.objects.count(),
'teams': Team.objects.count(),
'organizations': Organization.objects.count(),
'events.24h': events_24h,
},
'packages': get_all_package_versions(),
}
# TODO(dcramer): relay the response 'notices' as admin broadcasts
try:
request = safe_urlopen(BEACON_URL, json=payload, timeout=5)
response = safe_urlread(request)