How to use the dnn.e2emn.E2EMN function in dnn

To help you get started, we’ve selected a few dnn examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github shuyo / iir / dnn / e2emn.py View on Github external
parser.add_argument('-t', '--target', help='target data', default="tasks_1-20_v1-2/en/qa1_single-supporting-fact")
    parser.add_argument('--adam', help='use Adam optimizer', action="store_true")
    parser.add_argument('--pe', help='use Position Encoding', action="store_true")
    parser.add_argument('--ls', help='use Linear Start', action="store_true")
    parser.add_argument('--rn', help='use Random Noise', action="store_true")
    parser.add_argument("-g", "--gpu", default=-1, type=int, help="GPU ID (negative = CPU)")
    args = parser.parse_args()
    print(args)

    corpus = CorpusLoader()
    train_data = corpus.load(args.target+"_train.txt", device=args.gpu)
    valid_data = corpus.load(args.target+"_test.txt", device=args.gpu)
    print("knowledge=%d, query=%d, vocab=%d, answer=%d" %
        (train_data.ksize, len(train_data), len(corpus.vocab), len(corpus.vocab_a)))

    model = E2EMN(args.layer, args.dim, len(corpus.vocab), len(corpus.vocab_a), 100, pe=args.pe, rn=args.rn)
    xp = numpy
    if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()
        model.to_gpu()
        xp = chainer.cuda.cupy

    if args.adam:
        optimizer = chainer.optimizers.Adam()
    else:
        optimizer = chainer.optimizers.SGD(0.01)
    optimizer.setup(model)

    t0 = time.time()
    linear_start = args.ls
    min_valid_loss = 1e9
    loss_raise_count = 0
github shuyo / iir / dnn / e2emn.py View on Github external
def __init__(self, layer, D, vocab, vocab_ans, max_knowledge, pe=False, rn=False):
        super(E2EMN, self).__init__()
        self.layer = layer
        self.V = vocab
        self.pe = pe # Position Encoding
        self.rn = rn # Random Noise

        initializer = chainer.initializers.Normal(0.1)
        with self.init_scope():
            self.embedid_a = chainer.Parameter(initializer, (vocab, D))
            self.embedid_b = chainer.Parameter(initializer, (vocab, D))
            self.embedid_c = chainer.Parameter(initializer, (vocab, D))
            self.W = L.Linear(D, vocab_ans, initialW=initializer)
            self.temporal_a = chainer.Parameter(initializer, (max_knowledge, D))
            self.temporal_c = chainer.Parameter(initializer, (max_knowledge, D))
            if layer > 1:
                self.H = L.Linear(D, D, initialW=initializer)