Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
context.font_family = 'monospace'
with Image(width=self.MAX_WIDTH,
height=self.MAX_HEIGHT,
background=self.BACKGROUND_COLOR) as img:
vertical_offset = top_margin
font = Font(path=self.FONT, size=50, color=self.FONT_COLOR)
img.caption('SPb Python\nMeetup', left=left_margin, top=vertical_offset,
width=self.MAX_WIDTH,
height=self.MAX_HEIGHT,
gravity=None, font=font)
vertical_offset += 150
font = Font(path=self.FONT_BOLD, size=21, color=self.FONT_COLOR)
img.caption(self.talk, left=left_margin, top=vertical_offset,
width=self.MAX_WIDTH,
height=self.MAX_HEIGHT,
gravity=None, font=font)
vertical_offset += 50
font = Font(path=self.FONT_BOLD_ITALIC, size=17, color=self.FONT_COLOR)
img.caption(self.speaker, left=left_margin, top=vertical_offset,
width=self.MAX_WIDTH,
height=self.MAX_HEIGHT,
gravity=None, font=font)
with Image(filename='static/logo.png') as logo_img:
context.composite(operator='atop', left=300, top=top_margin-20,
width=logo_img.width / 3, height=logo_img.height / 3, image=logo_img)
if ext in ('jpg', 'jpeg'):
fmt = 'jpg'
elif ext == 'png':
fmt = 'png'
else:
return "I don't know how to handle format .{} files".format(ext), 404
if message:
text = message
else:
text = '{}x{}'.format(width, height)
min_font_ratio = width / (len(text) * 12.0)
size = max(16 * (height / 100), 16 * min_font_ratio)
font = Font(path='fonts/Inconsolata-dz-Powerline.otf', size=size)
c = Color(bg) if fmt == "jpg" else None
with Image(width=width, height=height, background=c) as image:
image.caption(text, left=0, top=0,
font=font,
gravity="center")
buff = image_to_buffer(image, fmt=ext, compress=False)
buff.seek(0)
return buff.read(), 200, {"Content-Type": content_type, "Cache-Control": CACHE_CONTROL}
def compose_adaptive_prey(img_paths=None, match_json=None, gap=5, horizontal_gap=5, description=None, caption="Catcierge"):
img = Image(width=600, height=1124, background=Color("#8A968E"))
#print("Font path: %s" % args.fonts)
font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" % args.fonts, size=64)
font_title = Font(path="%s/alex-brush/AlexBrush-Regular.ttf" % args.fonts, size=64)
font_math = Font(path="%s/Asana-Math/Asana-Math.otf" % args.fonts, size=64)
imgs = []
assert (img_paths and (len(img_paths) > 0)) or match_json, \
"Missing either a list of input image paths or a match json"
if not img_paths or len(img_paths) == 0:
step_count = match_json["step_count"]
for step in match_json["steps"][:step_count]:
print("Step: %s" % step["name"])
img_paths.append(step["path"])
# TODO: Allow any matcher type and number of images...
assert len(img_paths) == 1 or len(img_paths) == 11, \
def compose_adaptive_prey(img_paths=None, match_json=None, gap=5, horizontal_gap=5, description=None, caption="Catcierge"):
img = Image(width=600, height=1124, background=Color("#8A968E"))
#print("Font path: %s" % args.fonts)
font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" % args.fonts, size=64)
font_title = Font(path="%s/alex-brush/AlexBrush-Regular.ttf" % args.fonts, size=64)
font_math = Font(path="%s/Asana-Math/Asana-Math.otf" % args.fonts, size=64)
imgs = []
assert (img_paths and (len(img_paths) > 0)) or match_json, \
"Missing either a list of input image paths or a match json"
if not img_paths or len(img_paths) == 0:
step_count = match_json["step_count"]
for step in match_json["steps"][:step_count]:
print("Step: %s" % step["name"])
img_paths.append(step["path"])
# TODO: Allow any matcher type and number of images...
assert len(img_paths) == 1 or len(img_paths) == 11, \
"Invalid number of images %d, expected 2 or 11" % len(img_paths)
if modifierControl.get('ModifierKey') == modifier and modifierControl.get('Key') is not None:
bind['Modifiers'].append(modifierControl.get('Key'))
outputs[control['Group']][control['Name']] = bind
# Set up a screen state to handle output
screenState = {}
screenState['baseX'] = 60
screenState['baseY'] = 320
screenState['maxWidth'] = 0
screenState['thisWidth'] = 0
screenState['currentX'] = screenState['baseX']
screenState['currentY'] = screenState['baseY']
font = Font(getFontPath('Regular', 'Normal'), antialias=True, size=biggestFontSize)
groupTitleFont = Font(getFontPath('Regular', 'Normal'), antialias=True, size=biggestFontSize*2)
context.stroke_width=2
context.stroke_color=Color('Black')
context.fill_opacity=0
# Go through once for each display group
for displayGroup in displayGroups:
if outputs[displayGroup] == {}:
continue
writeText(context, sourceImg, displayGroup, screenState, groupTitleFont, False, True)
orderedOutputs = OrderedDict(sorted(outputs[displayGroup].items(), key=lambda x: x[1].get('Control').get('Order')))
for bindKey, bind in orderedOutputs.items():
for modifier in bind.get('Modifiers', []):
writeText(context, sourceImg, transKey(modifier), screenState, font, True, False)
writeText(context, sourceImg, transKey(bind.get('Key')), screenState, font, True, False)
return
check = await self.isimage(url)
if check == False:
await ctx.send("Invalid or Non-Image!")
return
xx = await ctx.message.channel.send( "ok, processing")
b = await self.bytes_download(url)
img = wand.image.Image(file=b)
i = img.clone()
font_path = str(bundled_data_path(self))+'/impact.ttf'
if size != None:
color = wand.color.Color('{0}'.format(color))
font = wand.font.Font(path=font_path, size=int(size), color=color)
elif color != None:
color = wand.color.Color('{0}'.format(color))
font = wand.font.Font(path=font_path, size=40, color=color)
else:
color = wand.color.Color('red')
font = wand.font.Font(path=font_path, size=40, color=color)
if x is None:
x = None
y = int(i.height/10)
if x != None and x > 250:
x = x/2
if y != None and y > 250:
y = y/2
if x != None and x > 500:
x = x/4
if y != None and y > 500:
y = y/4
if x != None:
i.caption(str(text), left=x, top=y, font=font, gravity='center')
def compose_adaptive_prey(img_paths=None, match_json=None, gap=5, horizontal_gap=5, description=None, caption="Catcierge"):
img = Image(width=600, height=1124, background=Color("#8A968E"))
#print("Font path: %s" % args.fonts)
font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" % args.fonts, size=64)
font_title = Font(path="%s/alex-brush/AlexBrush-Regular.ttf" % args.fonts, size=64)
font_math = Font(path="%s/Asana-Math/Asana-Math.otf" % args.fonts, size=64)
imgs = []
assert (img_paths and (len(img_paths) > 0)) or match_json, \
"Missing either a list of input image paths or a match json"
if not img_paths or len(img_paths) == 0:
step_count = match_json["step_count"]
for step in match_json["steps"][:step_count]:
print("Step: %s" % step["name"])
img_paths.append(step["path"])
# TODO: Allow any matcher type and number of images...
dilated = imgs[7] # Dilated image.
combined = imgs[8] # Combined image (re-inverted).
contours = imgs[9] # Contours of white areas.
final = imgs[10] # Final image.
# TODO: Enable creating these based on input instead.
kernel3x3 = create_kernel(w=3, h=3)
kernel2x2 = create_kernel(w=2, h=2)
kernel5x1 = create_kernel(w=5, h=1)
x_start = 20
img.caption(caption, left=(img.width - 250) / 2, top=5, width=250, height=100, font=font_title)
if description:
desc_font = Font(path="%s/source-code-pro/SourceCodePro-Medium.otf" % args.fonts, size=24)
text_width = (desc_font.size) * int(len(description) * 0.7)
img.caption(description, left=(img.width - text_width) / 2, top=80, width=text_width, height=100, font=desc_font)
height = 120
# Original.
img.composite(orgimg, left=mpos(orgimg.width), top=height)
height += orgimg.height + gap
# Detected head + cropped region of interest.
head_row = create_row([detected, croproi], [0, 0], horizontal_gap, caption="Detected head Cropped ROI")
img.composite(head_row, left=mpos(head_row.width), top=height)
height += head_row.height + gap
# TODO: simplify the code below by making the symbols into images before they're used to create the rows.