email_preprocess.py 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. #!/usr/bin/python
  2. import pickle
  3. import cPickle
  4. import numpy
  5. from sklearn import model_selection#cross_validation
  6. from sklearn.feature_extraction.text import TfidfVectorizer
  7. from sklearn.feature_selection import SelectPercentile, f_classif
  8. def preprocess(words_file = "../tools/word_data.pkl", authors_file="../tools/email_authors.pkl"):
  9. """
  10. this function takes a pre-made list of email texts (by default word_data.pkl)
  11. and the corresponding authors (by default email_authors.pkl) and performs
  12. a number of preprocessing steps:
  13. -- splits into training/testing sets (10% testing)
  14. -- vectorizes into tfidf matrix
  15. -- selects/keeps most helpful features
  16. after this, the feaures and labels are put into numpy arrays, which play nice with sklearn functions
  17. 4 objects are returned:
  18. -- training/testing features
  19. -- training/testing labels
  20. """
  21. ### the words (features) and authors (labels), already largely preprocessed
  22. ### this preprocessing will be repeated in the text learning mini-project
  23. authors_file_handler = open(authors_file, "r")
  24. authors = pickle.load(authors_file_handler)
  25. authors_file_handler.close()
  26. words_file_handler = open(words_file, "r")
  27. word_data = cPickle.load(words_file_handler)
  28. words_file_handler.close()
  29. ### test_size is the percentage of events assigned to the test set
  30. ### (remainder go into training)
  31. features_train, features_test, labels_train, labels_test = model_selection.train_test_split(word_data, authors, test_size=0.1, random_state=42)
  32. ### text vectorization--go from strings to lists of numbers
  33. vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
  34. stop_words='english')
  35. features_train_transformed = vectorizer.fit_transform(features_train)
  36. features_test_transformed = vectorizer.transform(features_test)
  37. ### feature selection, because text is super high dimensional and
  38. ### can be really computationally chewy as a result
  39. selector = SelectPercentile(f_classif, percentile=1)
  40. selector.fit(features_train_transformed, labels_train)
  41. features_train_transformed = selector.transform(features_train_transformed).toarray()
  42. features_test_transformed = selector.transform(features_test_transformed).toarray()
  43. ### info on the data
  44. print "no. of Chris training emails:", sum(labels_train)
  45. print "no. of Sara training emails:", len(labels_train)-sum(labels_train)
  46. return features_train_transformed, features_test_transformed, labels_train, labels_test