Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_combine_retain_on_no_retain(self):
text_model_a = sherlock_model_no_retain
text_model_b = sherlock_model
combo = markovify.combine([ text_model_a, text_model_b ])
assert(combo.retain_original)
assert(combo.parsed_sentences == text_model_b.parsed_sentences)
def test_combine_lists(self):
_list = list(sherlock_model.chain.model.items())
combo = markovify.combine([ _list, _list ])
def test_mismatched_state_sizes(self):
with self.assertRaises(Exception) as context:
text_model_a = markovify.Text(sherlock, state_size=2)
text_model_b = markovify.Text(sherlock, state_size=3)
combo = markovify.combine([ text_model_a, text_model_b ])
def test_mismatched_model_types(self):
with self.assertRaises(Exception) as context:
text_model_a = sherlock_model
text_model_b = markovify.NewlineText(sherlock)
combo = markovify.combine([ text_model_a, text_model_b ])
def test_simple(self):
text_model = sherlock_model
combo = markovify.combine([ text_model, text_model ], [ 0.5, 0.5 ])
assert(combo.chain.model == text_model.chain.model)
def test_compiled_model_fail(self):
with self.assertRaises(Exception) as context:
model_a = sherlock_model
model_b = sherlock_model_compiled
combo = markovify.combine([ model_a, model_b ])
# Normalize the quote data... Get rid of IRC junk
clean_quotes = [normalize_quote(d['quote']) for d in quotes]
# Normalize the inspire data... Just lightly prune authors
clean_inspirations = [normalize_inspiration(d['text']) for d in inspirations]
# Normalize the rant data... just remove ending punctuation
clean_rants = [normalize_rant(d['text']) for d in rants]
# Create the three models, and combine them.
# More heavily weight our quotes and rants
rants_model = markovify.NewlineText('\n'.join(clean_rants))
quotes_model = markovify.NewlineText('\n'.join(clean_quotes))
inspire_model = markovify.NewlineText('\n'.join(clean_inspirations))
return markovify.combine([quotes_model, rants_model, inspire_model], model_weights)
import sys
import markovify
with open('ico.json') as f:
text = f.read()
ico_model = markovify.Text.from_json(text)
with open('erowid.json') as f:
text = f.read()
erowid_model = markovify.Text.from_json(text)
# Combine models
combo = markovify.combine([ico_model, erowid_model], [1.5, 1])
for i in range(int(sys.argv[1])):
print(combo.make_short_sentence(200))
empty_chain = target
break
chain_dicts.append(deserialize(target_chain.get("chain")))
failed = False
for chain_dict in chain_dicts:
try:
chain_task = functools.partial(markovify.Text.from_dict, chain_dict)
chain = await cmd.bot.loop.run_in_executor(threads, chain_task)
chains.append(chain)
except (ValueError, KeyError, AttributeError):
failed = True
if not empty_chain:
if not failed:
await cmd.bot.cool_down.set_cooldown(cmd.name, pld.msg.author, 20)
try:
combine_task = functools.partial(markovify.combine, chains)
combination = await cmd.bot.loop.run_in_executor(threads, combine_task)
sentence_function = functools.partial(combination.make_short_sentence, 500)
sentence = await cmd.bot.loop.run_in_executor(threads, sentence_function)
except (ValueError, KeyError, AttributeError):
sentence = None
if not sentence:
not_enough_data = '😖 I could not think of anything... I need more chain items!'
response = discord.Embed(color=0xBE1931, title=not_enough_data)
else:
combined_name = combine_names(pld.msg.mentions)
response = discord.Embed(color=0xbdddf4)
response.set_author(name=combined_name, icon_url=user_avatar(secrets.choice(pld.msg.mentions)))
response.add_field(name='💭 Hmm... something like...', value=sentence)
else:
response = error('Failed to combine the markov chains.')
else:
def markov(input: input_type, ngram_size=1, num_output_sentences=5) -> List[str]:
"""Markov chain text generation from markovify library, supports custom n-gram length
Keyword arguments:
n-gram size: determines what n-gram model to use: x where x is order-x n-gram
num_output_sentences: number of sentencess to output
"""
if type(input) == list:
list_of_texts = input
elif type(input) == str:
list_of_texts = [input]
markov_models = []
for text in list_of_texts:
markov_models.append(markovify.Text(text, state_size=ngram_size))
textgen = markovify.combine(markov_models)
output_sentences = []
while len(output_sentences) < num_output_sentences:
sentence = textgen.make_sentence()
if isinstance(sentence, str):
output_sentences.append(sentence)
return output_sentences