import numpy as np from collections import Counter import random import sys import codecsnp.random.seed(12345) with codecs.open('spam.txt',"r",encoding='utf-8',errors='ignore') as f: ← http://www2.aueb.gr/users/ion/data/enron-spam/ raw = f.readlines() vocab, spam, ham = (set(["<unk>"]), list(), list()) for row in raw: spam.append(set(row[:-2].split(" "))) for word in spam[-1]: vocab.add(word) with codecs.open('ham.txt',"r",encoding='utf-8',errors='ignore') as f: raw = f.readlines() for row in raw: ham.append(set(row[:-2].split(" "))) for word in ham[-1]: vocab.add(word) vocab, w2i = (list(vocab), {}) for i,w in enumerate(vocab): w2i[w] = i def to_indices(input, l=500): indices = list() for line in input: if(len(line) < l): line = list(line) + ["<unk>"] * (l - len(line)) idxs = list() for word in line: idxs.append(w2i[word]) indices.append(idxs) return indices
spam_idx = to_indices(spam) ham_idx = to_indices(ham) train_spam_idx = spam_idx[0:-1000] train_ham_idx = ham_idx[0:-1000] test_spam_idx = spam_idx[-1000:] test_ham_idx = ham_idx[-1000:] train_data = list() train_target = list() test_data = list() test_target = list() for i in range(max(len(train_spam_idx),len(train_ham_idx))): train_data.append(train_spam_idx[i%len(train_spam_idx)]) train_target.append([1]) train_data.append(train_ham_idx[i%len(train_ham_idx)]) train_target.append([0]) for i in range(max(len(test_spam_idx),len(test_ham_idx))): test_data.append(test_spam_idx[i%len(test_spam_idx)]) test_target.append([1]) test_data.append(test_ham_idx[i%len(test_ham_idx)]) test_target.append([0]) def train(model, input_data, target_data, batch_size=500, iterations=5): n_batches = int(len(input_data) / batch_size) for iter in range(iterations): iter_loss = 0 for b_i in range(n_batches): # model.weight.data[w2i['<unk>']] *= 0 input = Tensor(input_data[b_i*bs:(b_i+1)*bs], autograd=True) target = Tensor(target_data[b_i*bs:(b_i+1)*bs], autograd=True) pred = model.forward(input).sum(1).sigmoid() loss = criterion.forward(pred,target) loss.backward() optim.step() iter_loss += loss.data[0] / bs sys.stdout.write("\r\tLoss:" + str(iter_loss / (b_i+1))) print() return model def test(model, test_input, test_output): model.weight.data[w2i['<unk>']] *= 0 input = Tensor(test_input, autograd=True) target = Tensor(test_output, autograd=True) pred = model.forward(input).sum(1).sigmoid() return ((pred.data > 0.5) == target.data).mean()
']] * = spam_idx = to_indices(spam) ham_idx = to_indices(ham) train_spam_idx = spam_idx[0:-1000] train_ham_idx = ham_idx[0:-1000] test_spam_idx = spam_idx[-1000:] test_ham_idx = ham_idx[-1000:] train_data = list() train_target = list() test_data = list() test_target = list() for i in range(max(len(train_spam_idx),len(train_ham_idx))): train_data.append(train_spam_idx[i%len(train_spam_idx)]) train_target.append([1]) train_data.append(train_ham_idx[i%len(train_ham_idx)]) train_target.append([0]) for i in range(max(len(test_spam_idx),len(test_ham_idx))): test_data.append(test_spam_idx[i%len(test_spam_idx)]) test_target.append([1]) test_data.append(test_ham_idx[i%len(test_ham_idx)]) test_target.append([0]) def train(model, input_data, target_data, batch_size=500, iterations=5): n_batches = int(len(input_data) / batch_size) for iter in range(iterations): iter_loss = 0 for b_i in range(n_batches): # model.weight.data[w2i['<unk>']] *= 0 input = Tensor(input_data[b_i*bs:(b_i+1)*bs], autograd=True) target = Tensor(target_data[b_i*bs:(b_i+1)*bs], autograd=True) pred = model.forward(input).sum(1).sigmoid() loss = criterion.forward(pred,target) loss.backward() optim.step() iter_loss += loss.data[0] / bs sys.stdout.write("\r\tLoss:" + str(iter_loss / (b_i+1))) print() return model def test(model, test_input, test_output): model.weight.data[w2i['<unk>']] *= 0 input = Tensor(test_input, autograd=True) target = Tensor(test_output, autograd=True) pred = model.forward(input).sum(1).sigmoid() return ((pred.data > 0.5) == target.data).mean()
']] * = spam_idx = to_indices(spam) ham_idx = to_indices(ham) train_spam_idx = spam_idx[0:-1000] train_ham_idx = ham_idx[0:-1000] test_spam_idx = spam_idx[-1000:] test_ham_idx = ham_idx[-1000:] train_data = list() train_target = list() test_data = list() test_target = list() for i in range(max(len(train_spam_idx),len(train_ham_idx))): train_data.append(train_spam_idx[i%len(train_spam_idx)]) train_target.append([1]) train_data.append(train_ham_idx[i%len(train_ham_idx)]) train_target.append([0]) for i in range(max(len(test_spam_idx),len(test_ham_idx))): test_data.append(test_spam_idx[i%len(test_spam_idx)]) test_target.append([1]) test_data.append(test_ham_idx[i%len(test_ham_idx)]) test_target.append([0]) def train(model, input_data, target_data, batch_size=500, iterations=5): n_batches = int(len(input_data) / batch_size) for iter in range(iterations): iter_loss = 0 for b_i in range(n_batches): # model.weight.data[w2i['<unk>']] *= 0 input = Tensor(input_data[b_i*bs:(b_i+1)*bs], autograd=True) target = Tensor(target_data[b_i*bs:(b_i+1)*bs], autograd=True) pred = model.forward(input).sum(1).sigmoid() loss = criterion.forward(pred,target) loss.backward() optim.step() iter_loss += loss.data[0] / bs sys.stdout.write("\r\tLoss:" + str(iter_loss / (b_i+1))) print() return model def test(model, test_input, test_output): model.weight.data[w2i['<unk>']] *= 0 input = Tensor(test_input, autograd=True) target = Tensor(test_output, autograd=True) pred = model.forward(input).sum(1).sigmoid() return ((pred.data > 0.5) == target.data).mean()
model = Embedding(vocab_size=len(vocab), dim=1) model.weight.data *= 0 criterion = MSELoss() optim = SGD(parameters=model.get_parameters(), alpha=0.01) for i in range(3): model = train(model, train_data, train_target, iterations=1) print("% Correct on Test Set: " + \ str(test(model, test_data, test_target)*100)) ______________________________________________________________________________ Loss:0.037140416860871446 % Correct on Test Set: 98.65 Loss:0.011258669226059114 % Correct on Test Set: 99.15 Loss:0.008068268387986223 % Correct on Test Set: 99.45
bob = (train_data[0:1000], train_target[0:1000]) alice = (train_data[1000:2000], train_target[1000:2000]) sue = (train_data[2000:], train_target[2000:])
for i in range(3): print("Starting Training Round...") print("\tStep 1: send the model to Bob") bob_model = train(copy.deepcopy(model), bob[0], bob[1], iterations=1) print("\n\tStep 2: send the model to Alice") alice_model = train(copy.deepcopy(model), alice[0], alice[1], iterations=1) print("\n\tStep 3: Send the model to Sue") sue_model = train(copy.deepcopy(model), sue[0], sue[1], iterations=1) print("\n\tAverage Everyone's New Models") model.weight.data = (bob_model.weight.data + \ alice_model.weight.data + \ sue_model.weight.data)/3 print("\t% Correct on Test Set: " + \ str(test(model, test_data, test_target)*100)) print("\nRepeat!!\n")
Starting Training Round... Step 1: send the model to Bob Loss:0.21908166249699718 ...... Step 3: Send the model to Sue Loss:0.015368461608470256 Average Everyone's New Models % Correct on Test Set: 98.8
import copy bobs_email = ["my", "computer", "password", "is", "pizza"] bob_input = np.array([[w2i[x] for x in bobs_email]]) bob_target = np.array([[0]]) model = Embedding(vocab_size=len(vocab), dim=1) model.weight.data *= 0 bobs_model = train(copy.deepcopy(model), bob_input, bob_target, iterations=1, batch_size=1)
for i, v in enumerate(bobs_model.weight.data - model.weight.data): if(v != 0): print(vocab[i])
is pizza computer password my
Source: https://habr.com/ru/post/458800/
All Articles