How to use vowpalwabbit - 10 common examples

To help you get started, we’ve selected a few vowpalwabbit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github VowpalWabbit / vowpal_wabbit / python / examples / test_search.py View on Github external
(VERB, 'ate'),
                (DET , 'a'),
                (ADJ , 'big'),
                (NOUN, 'sandwich')],
               [(DET , 'the'),
                (NOUN, 'sandwich'),
                (VERB, 'was'),
                (ADJ , 'tasty')],
               [(NOUN, 'it'),
                (VERB, 'ate'),
                (NOUN, 'it'),
                (ADJ , 'all')] ]


# initialize VW as usual, but use 'hook' as the search_task
vw = pyvw.vw("--search 4 --quiet --search_task hook --ring_size 1024")

# tell VW to construct your search task object
sequenceLabeler = vw.init_search_task(SequenceLabeler)

# train it on the above dataset ten times; the my_dataset.__iter__ feeds into _run above
print('training!', file=sys.stderr)
for i in range(10):
    sequenceLabeler.learn(my_dataset)

# now see the predictions on a test sentence
print('predicting!', file=sys.stderr)
print(sequenceLabeler.predict( [(1,w) for w in "the sandwich ate a monster".split()] ))
print('should have printed: [1, 2, 3, 1, 2]')
github VowpalWabbit / vowpal_wabbit / python / examples / test_search_ldf.py View on Github external
ex.set_label_string(str(p) + ':0')
        return ex

    def _run(self, sentence):   # it's called _run to remind you that you shouldn't call it directly!
        output = []
        for n in range(len(sentence)):
            pos,word = sentence[n]
            # use "with...as..." to guarantee that the example is finished properly
            ex = [ self.makeExample(word,p) for p in [DET,NOUN,VERB,ADJ] ]
            pred = self.sch.predict(examples=ex, my_tag=n+1, oracle=pos, condition=(n,'p'))
            vw.finish_example(ex)
            output.append(pred)
        return output

# initialize VW as usual, but use 'hook' as the search_task
vw = pyvw.vw("--search 0 --csoaa_ldf m --quiet --search_task hook --ring_size 1024")

# tell VW to construct your search task object
sequenceLabeler = vw.init_search_task(SequenceLabeler)

# train it on the above dataset ten times; the my_dataset.__iter__ feeds into _run above
print('training!')
i = 0
while i < 10:
    sequenceLabeler.learn(my_dataset)
    i += 1

# now see the predictions on a test sentence
print('predicting!', file=sys.stderr)
print(sequenceLabeler.predict( [(1,w) for w in "the sandwich ate a monster".split()] ))
print('should have printed: [1, 2, 3, 1, 2]')
github Kaggle / docker-python / test_build.py View on Github external
for ds in client.list_datasets(): pass
except:
    pass
httpd.shutdown()
assert fake_bq_called, "Fake server did not recieve a request from the BQ client."
assert fake_bq_header_found, "X-KAGGLE-PROXY-DATA header was missing from the BQ request."
print("bigquery proxy ok")

import shap
print("shap ok")

import kmapper
print("kmapper ok")

from vowpalwabbit import pyvw
vw = pyvw.vw(quiet=True)
ex = vw.example('1 | a b c')
vw.learn(ex)
print(vw.predict(ex))
print('vowpalwabbit ok')

import essentia
print(essentia.__version__)
print("Essentia ok")
github hal3 / macarico / tests / test_sequence_labeler.py View on Github external
parser = argparse.ArgumentParser()
    parser.add_argument('--method', type=str, choices=['reslope', 'prep', 'mc', 'bootstrap'],
                        default='prep')
    parser.add_argument('--env', type=str, choices=[
        'gridworld', 'gridworld_stoch', 'gridworld_ep', 'cartpole', 'hex', 'blackjack', 'sl', 'dep'],
                        help='Environment to run on', default='gridworld')
    parser.add_argument('--alr', type=float, help='Actor learning rate', default=0.0005)
    parser.add_argument('--vdlr', type=float, help='Value difference learning rate', default=0.005)
    parser.add_argument('--clr', type=float, help='Critic learning rate', default=0.005)
    parser.add_argument('--clip', type=float, help='Gradient clipping argument', default=10)
    parser.add_argument('--exp', type=str, help='Exploration method', default='eps',
                        choices=['eps', 'softmax', 'bagging'])
    parser.add_argument('--exp_param', type=float, help='Parameter for exp. method', default=0.4)
    args = parser.parse_args()
#    policy = VWPolicy(actor, n_labels, lr=args.alr, exp_type=args.exp, exp_param=args.exp_param)
    vd_regressor = pyvw.vw('-l ' + str(args.vdlr), quiet=True)
    ref_critic = pyvw.vw('-l ' + str(args.clr), quiet=True)
    learner_type = 'prep'
#    learner = VwPrep(policy, actor, vd_regressor, ref_critic, learner_type)

    loss_fn = sl.HammingLoss
    # TODO what is the best value for n_epochs?
    n_epochs = 1
    warm = True
    if warm:
        macarico.util.TrainLoop(mk_env, policy, learner, optimizer, losses=[loss_fn, loss_fn, loss_fn], progress_bar=False,
                                minibatch_size=np.random.choice([1]),).train(training_data=tr, dev_data=de,
                                                                             n_epochs=n_epochs)
    # Load wsj again
    data_dir = 'bandit_data/pos/pos_wsj.mac'
    n_tr = 42000
    n_de = 0
github VowpalWabbit / vowpal_wabbit / python / examples / test_search_ldf.py View on Github external
def __init__(self, vw, sch, num_actions):
        # you must must must initialize the parent class
        # this will automatically store self.sch <- sch, self.vw <- vw
        pyvw.SearchTask.__init__(self, vw, sch, num_actions)

        # set whatever options you want
        sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES | sch.IS_LDF )
github VowpalWabbit / vowpal_wabbit / python / examples / test_partial_example.py View on Github external
from vowpalwabbit import pyvw

vw = pyvw.vw('--audit')
full = vw.example( { 'a': ['b'], 'x': ['y'] } )
full.learn()

part = vw.example( {'a': ['b'] } )
part.learn()

part.push_features('x', ['y'])
part.learn()

part.erase_namespace(ord('x'))
part.push_features('x', ['z'])
part.learn()
github VowpalWabbit / vowpal_wabbit / python / examples / test_search.py View on Github external
from __future__ import print_function

import sys
from vowpalwabbit import pyvw


class SequenceLabeler(pyvw.SearchTask):
    def __init__(self, vw, sch, num_actions):
        # you must must must initialize the parent class
        # this will automatically store self.sch <- sch, self.vw <- vw
        pyvw.SearchTask.__init__(self, vw, sch, num_actions)

        # you can test program options with sch.po_exists
        # and get their values with sch.po_get -> string and
        # sch.po_get_int -> int
        if sch.po_exists('search'):
            print('found --search')
            print('--search value =', sch.po_get('search'), ', type =', type(sch.po_get('search')))

        # set whatever options you want
        sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )

    def _run(self, sentence):   # it's called _run to remind you that you shouldn't call it directly!
github VowpalWabbit / vowpal_wabbit / python / examples / test_search.py View on Github external
def __init__(self, vw, sch, num_actions):
        # you must must must initialize the parent class
        # this will automatically store self.sch <- sch, self.vw <- vw
        pyvw.SearchTask.__init__(self, vw, sch, num_actions)

        # you can test program options with sch.po_exists
        # and get their values with sch.po_get -> string and
        # sch.po_get_int -> int
        if sch.po_exists('search'):
            print('found --search')
            print('--search value =', sch.po_get('search'), ', type =', type(sch.po_get('search')))

        # set whatever options you want
        sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES )
github VowpalWabbit / vowpal_wabbit / python / examples / test_search_ldf.py View on Github external
(NOUN, 'monster'),
                (VERB, 'ate'),
                (DET , 'a'),
                (ADJ , 'big'),
                (NOUN, 'sandwich')],
               [(DET , 'the'),
                (NOUN, 'sandwich'),
                (VERB, 'was'),
                (ADJ , 'tasty')],
               [(NOUN, 'it'),
                (VERB, 'ate'),
                (NOUN, 'it'),
                (ADJ , 'all')] ]


class SequenceLabeler(pyvw.SearchTask):
    def __init__(self, vw, sch, num_actions):
        # you must must must initialize the parent class
        # this will automatically store self.sch <- sch, self.vw <- vw
        pyvw.SearchTask.__init__(self, vw, sch, num_actions)

        # set whatever options you want
        sch.set_options( sch.AUTO_HAMMING_LOSS | sch.AUTO_CONDITION_FEATURES | sch.IS_LDF )

    def makeExample(self, word, p):
        ex = self.example({'w': [word + '_' + str(p)]}, labelType=self.vw.lCostSensitive)
        ex.set_label_string(str(p) + ':0')
        return ex

    def _run(self, sentence):   # it's called _run to remind you that you shouldn't call it directly!
        output = []
        for n in range(len(sentence)):
github VowpalWabbit / vowpal_wabbit / python / examples / word_alignment.py View on Github external
pred = self.sch.predict(examples  = examples,
                                    my_tag    = i+1,
                                    oracle    = oracle,
                                    condition = [ (i, 'p'), (i-1, 'q') ] )

            for ex in examples: ex.finish()

            output.append( spans[pred][2] )
            for j in spans[pred][2]:
                covered[j] = True

        return output


print('training LDF')
vw = pyvw.vw("--search 0 --csoaa_ldf m --search_task hook --ring_size 1024 --quiet -q ef -q ep")
task = vw.init_search_task(WordAligner)
for p in range(10):
    task.learn(my_dataset)
print('====== test ======')
print(task.predict( ("the blue flower".split(), ([],[],[]), "la fleur bleue".split()) ))
print('should have printed [[0], [2], [1]]')

vowpalwabbit

Vowpal Wabbit Python package

BSD-3-Clause
Latest version published 9 months ago

Package Health Score

83 / 100
Full package analysis

Similar packages