Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_discrete_sentiment(self):
blob = tb.TextBlob("I feel great today.", analyzer=NaiveBayesAnalyzer())
assert_equal(blob.sentiment[0], 'pos')
def test_override_analyzer(self):
b = tb.Blobber(analyzer=NaiveBayesAnalyzer())
blob = b("How now?")
blob2 = b("Brown cow")
assert_true(isinstance(blob.analyzer, NaiveBayesAnalyzer))
assert_true(blob.analyzer is blob2.analyzer)
def setUp(self):
self.analyzer = NaiveBayesAnalyzer()
def test_can_use_different_sentanalyzer(self):
blob = tb.TextBlob("I love this car", analyzer=NaiveBayesAnalyzer())
assert_true(isinstance(blob.analyzer, NaiveBayesAnalyzer))
def test_can_get_subjectivity_and_polarity_with_different_analyzer(self):
blob = tb.TextBlob("I love this car.", analyzer=NaiveBayesAnalyzer())
pattern = PatternAnalyzer()
assert_equal(blob.polarity, pattern.analyze(str(blob))[0])
assert_equal(blob.subjectivity, pattern.analyze(str(blob))[1])
#######################
polar=0
subj=0
count=0
pos=0
neg=0
countForSentiment=0
print("CSV_FILE_NAME:"+csv_file)
for t in csv.DictReader(open(csv_file), delimiter=','):
count=count+1
into = str(t['title'])
into = into.decode('utf-8')
blob = TextBlob(into, analyzer = NaiveBayesAnalyzer())
print "positivity:",blob.sentiment.p_pos
print "negativity:",blob.sentiment.p_neg
pos = pos + blob.sentiment.p_pos
neg = neg + blob.sentiment.p_neg
test = TextBlob(into)
if(test.sentiment.polarity != 0 or test.sentiment.subjectivity !=0 ):
polar = polar + test.sentiment.polarity
subj = subj + test.sentiment.subjectivity
print "subjectivity:", test.sentiment.subjectivity
print "polarity:", test.sentiment.polarity
countForSentiment = countForSentiment+1
print (str(t['id'])+"\t"+str(test.sentiment.subjectivity)+"\t"+str(test.sentiment.polarity))
#print test.sentiment.polarity
#print test.sentiment.subjectivity
words.extend(t['title'].lower().split()) # <-----------
# -*- coding: utf-8 -*-
import re
from nltk import RegexpParser
from textblob import TextBlob, Word
from textblob.taggers import PatternTagger
from textblob.sentiments import NaiveBayesAnalyzer
pattern_tagger = PatternTagger()
naive_bayes_analyzer = NaiveBayesAnalyzer()
stopwords = [
"a", "a's", "able", "about", "above", "according", "accordingly", "across",
"actually", "after", "afterwards", "again", "against", "ain't", "all",
"allow", "allows", "almost", "alone", "along", "already", "also",
"although", "always", "am", "among", "amongst", "an", "and", "another",
"any", "anybody", "anyhow", "anyone", "anything", "anyway", "anyways",
"anywhere", "apart", "appear", "appreciate", "appropriate", "are",
"aren't", "around", "as", "aside", "ask", "asking", "associated", "at",
"available", "away", "awfully", "b", "be", "became", "because", "become",
"becomes", "becoming", "been", "before", "beforehand", "behind", "being",
"believe", "below", "beside", "besides", "best", "better", "between",
"beyond", "both", "brief", "but", "by", "c", "c'mon", "c's", "came", "can",
"can't", "cannot", "cant", "cause", "causes", "certain", "certainly",
"changes", "clearly", "co", "com", "come", "comes", "concerning",
def on_status(self, data):
""""Callback of tweepy to process each new received tweet."""
tweet = self.clean(data.text)
if "RT" not in tweet:
if data.user.location:
location = self.clean(data.user.location)
else:
location = "None"
if data.lang:
lang = data.lang
else:
lang = "None"
blob = tb(tweet, analyzer=NaiveBayesAnalyzer())
print(str(blob.sentiment.p_pos) + "," +
str(blob.sentiment.p_neg) + "," +
lang + "," +
location)
sys.stdout.flush()
return True
media_id = get_post_id(insta_username)
request_url = (BASE_URL + 'media/%s/comments/?access_token=%s') % (media_id, APP_ACCESS_TOKEN)
print 'GET request url : %s' % (request_url)
comment_info = requests.get(request_url).json()
if comment_info['meta']['code'] == 200:
if len(comment_info['data']):
#Here's a naive implementation of how to delete the negative comments :)
for x in range(0, len(comment_info['data'])):
comment_id = comment_info['data'][x]['id']
comment_text = comment_info['data'][x]['text']
total_comments.append(comment_text) #adding total number of comments to the list
blob = TextBlob(comment_text, analyzer=NaiveBayesAnalyzer())
if (blob.sentiment.p_neg > blob.sentiment.p_pos):
negative_comments.append(comment_text) # adding negative comments
print Fore.RED+Style.BRIGHT+'Negative comment : %s' % (comment_text)
delete_url = (BASE_URL + 'media/%s/comments/%s/?access_token=%s') % (media_id, comment_id, APP_ACCESS_TOKEN)
print 'DELETE request url : %s' % (delete_url)
delete_info = requests.delete(delete_url).json()
if delete_info['meta']['code'] == 200:
print 'Comment successfully deleted!\n'
else:
print 'Unable to delete comment!'
else:
positive_comments.append(comment_text) #adding positive comments
print Fore.BLUE+Style.BRIGHT+'Positive comment : %s\n' % (comment_text)
else: