How to use the funcy.pluck function in funcy

To help you get started, we’ve selected a few funcy examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github amauboussin / arxiv-twitterbot / conv_net.py View on Github external
"""
        embedding_size = model_options.get('embedding_size', 128)
        filter_sizes = model_options.get('filter_sizes', [2, 3, 4])
        n_filters = model_options.get('n_filters', 25)
        pool_size = model_options.get('pool_size', 4)
        hidden_dims = model_options.get('hidden_dims', 128)
        dropout_prob = model_options.get('dropout_prob', .5)
        conv_l2 = model_options.get('conv_l2', .05)
        fc_l2 = model_options.get('fc_l2', .05)
        balance_classes = model_options.get('balance_classes', False)

        self.train_labels = pluck('label', train)
        self.x_train, self.x_test = pluck('content', train), pluck('content', test)
        self.y_train, self.y_test = pluck('label', train), pluck('label', test)

        self.train_ids = pluck('id', train)
        self.test_ids = pluck('id', test)

        self.transform = DocToWordIndices().fit(self.x_train)
        self.x_train = self.transform.transform(self.x_train)
        self.x_test = self.transform.transform(self.x_test)

        self.vocab_size = np.max(self.x_train) + 1  # vocab and classes are 0 indexed
        self.n_labels = int(np.max(self.y_train)) + 1
        self.y_train, self.y_test = to_categorical(self.y_train), to_categorical(self.y_test)

        self.sequence_length = self.x_train.shape[1]
        self.n_labels = self.y_train.shape[1]
        self.balance_classes = balance_classes

        conv_input = Input(shape=(self.sequence_length, embedding_size))
        convs = []
github OpenMTC / OpenMTC / server / openmtc-cse / src / openmtc_cse / plugins / transport_gevent_http / wsgi.py View on Github external
def _get_addresses(self, family):
        try:
            return self.__cached_addresses[family]
        except KeyError:
            from netifaces import interfaces, ifaddresses

            addresses = self.__cached_addresses[family] = set()

            for interface in interfaces():
                try:
                    ifdata = ifaddresses(interface)[family]
                    ifaddrs = map(lambda x: x.split("%")[0], pluck("addr",
                                                                   ifdata))
                    addresses.update(ifaddrs)
                except KeyError:
                    pass

            return addresses
github amauboussin / arxiv-twitterbot / conv_net.py View on Github external
train: List of train examples
            test: List of test (validation) examples
        """
        embedding_size = model_options.get('embedding_size', 128)
        filter_sizes = model_options.get('filter_sizes', [2, 3, 4])
        n_filters = model_options.get('n_filters', 25)
        pool_size = model_options.get('pool_size', 4)
        hidden_dims = model_options.get('hidden_dims', 128)
        dropout_prob = model_options.get('dropout_prob', .5)
        conv_l2 = model_options.get('conv_l2', .05)
        fc_l2 = model_options.get('fc_l2', .05)
        balance_classes = model_options.get('balance_classes', False)

        self.train_labels = pluck('label', train)
        self.x_train, self.x_test = pluck('content', train), pluck('content', test)
        self.y_train, self.y_test = pluck('label', train), pluck('label', test)

        self.train_ids = pluck('id', train)
        self.test_ids = pluck('id', test)

        self.transform = DocToWordIndices().fit(self.x_train)
        self.x_train = self.transform.transform(self.x_train)
        self.x_test = self.transform.transform(self.x_test)

        self.vocab_size = np.max(self.x_train) + 1  # vocab and classes are 0 indexed
        self.n_labels = int(np.max(self.y_train)) + 1
        self.y_train, self.y_test = to_categorical(self.y_train), to_categorical(self.y_test)

        self.sequence_length = self.x_train.shape[1]
        self.n_labels = self.y_train.shape[1]
        self.balance_classes = balance_classes
github amauboussin / arxiv-twitterbot / conv_net.py View on Github external
Args:
            train: List of train examples
            test: List of test (validation) examples
        """
        embedding_size = model_options.get('embedding_size', 128)
        filter_sizes = model_options.get('filter_sizes', [2, 3, 4])
        n_filters = model_options.get('n_filters', 25)
        pool_size = model_options.get('pool_size', 4)
        hidden_dims = model_options.get('hidden_dims', 128)
        dropout_prob = model_options.get('dropout_prob', .5)
        conv_l2 = model_options.get('conv_l2', .05)
        fc_l2 = model_options.get('fc_l2', .05)
        balance_classes = model_options.get('balance_classes', False)

        self.train_labels = pluck('label', train)
        self.x_train, self.x_test = pluck('content', train), pluck('content', test)
        self.y_train, self.y_test = pluck('label', train), pluck('label', test)

        self.train_ids = pluck('id', train)
        self.test_ids = pluck('id', test)

        self.transform = DocToWordIndices().fit(self.x_train)
        self.x_train = self.transform.transform(self.x_train)
        self.x_test = self.transform.transform(self.x_test)

        self.vocab_size = np.max(self.x_train) + 1  # vocab and classes are 0 indexed
        self.n_labels = int(np.max(self.y_train)) + 1
        self.y_train, self.y_test = to_categorical(self.y_train), to_categorical(self.y_test)

        self.sequence_length = self.x_train.shape[1]
        self.n_labels = self.y_train.shape[1]
        self.balance_classes = balance_classes
github mozilla / dxr / dxr / plugins / needles.py View on Github external
def group_needles(line_needles):
    """Group line needles by line. [(_, line)] -> [[_]]."""
    grouped_needles = sorted(group_by(itemgetter(1), line_needles).iteritems(),
                             key=itemgetter(0))
    return [map(itemgetter(0), ndl) for ndl in pluck(1, grouped_needles)]
github mvcisback / py-metric-temporal-logic / mtl / fastboolean_eval.py View on Github external
def get_times(x, tau, lo, hi):
    end = min(v.last_key() for v in x.values())

    lo, hi = map(float, (lo, hi))
    hi = hi + tau if hi + tau <= end else end
    lo = lo + tau if lo + tau <= end else end

    if lo > hi:
        return []
    elif hi == lo:
        return [lo]

    all_times = fn.cat(v.slice(lo, hi).items() for v in x.values())
    return sorted(set(fn.pluck(0, all_times)))
github YumaInaura / YumaInaura / api / lib / twitter / add-hashtag.py View on Github external
#!/usr/bin/env python3

import sys, json, os, re 
from funcy import pluck

seeds = json.loads(sys.stdin.read())

tags_file = sys.argv[1]
read_tags_file = open(tags_file, "r").read()
tags = json.loads(read_tags_file)

dictionary_json_key = os.environ.get('DICTIONARY_JSON_KEY') if os.environ.get('DICTIONARY_JSON_KEY') else "text"
json_key = os.environ.get('ADD_HASHTAG_JSON_KEY') if os.environ.get('ADD_HASHTAG_JSON_KEY') else "text"

regexp_or = '|'.join(list(pluck(dictionary_json_key, tags)))

regex_pattern = r'(\b(?
github uploadcare / intercom-rank / app / intercom / service.py View on Github external
def iter_data(data):
            if force:
                return iter(data)

            # Filter notes for excluding duplicates
            exist_notes = self.get_notes(pluck('user_id', data))

            for row in data:
                user_id, body = str(row['user_id']), row['body']

                if user_id not in exist_notes:
                    yield row
                    continue

                bodies = map(normalize_note,
                             pluck('body', exist_notes[user_id]))
                if normalize_note(body) not in bodies:
                    yield row
                    continue

                logger.debug(
                    'The note with this body already exists: %r', row)
github YumaInaura / YumaInaura / api / lib / github / edit-issue.py View on Github external
edit_api_url = 'https://api.github.com/repos/%s/%s/issues/%s' % (owner, repository, issue_number)

  update = {}

  if 'title' in edit:
    update['title'] = edit['title']

  if 'body' in edit:
    update['body'] = edit['body']

  if 'labels' in edit:
    update['labels'] = edit['labels']

  if 'remove_labels' in edit:
    issue = get_issue(edit)
    label_names = list(funcy.pluck('name', issue['labels']))

    update['labels'] = list(set(label_names) - set(edit['remove_labels']))

  res = session.post(edit_api_url, json.dumps(update))

  results.append(res.json())
     
print(json.dumps(results))
github remorses / mongoke / generated / generated / resolvers / support.py View on Github external
def zip_pluck(d, *keys):
    return zip(*[pluck(k, d) for k in keys])