Say that I have 100 tweets.
In those tweets, I need to extract: 1) food names, and 2) beverage names.
Example of tweet:
"Yesterday I had a coca cola, and a hot dog for lunch, and some bana split for desert. I liked the coke, but the banana in the banana split dessert was ripe"
I have to my disposal two lexicons. One with food names, and one with beverage names.
Example in food names lexicon:
"hot dog"
"banana"
"banana split"
Example in beverage names lexicon:
"coke"
"cola"
"coca cola"
What I should be able to extract:
[[["coca cola", "beverage"], ["hot dog", "food"], ["banana split", "food"]],
[["coke", "beverage"], ["banana", "food"], ["banana split", "food"]]]
The names in the lexicons can be 1-5 word(s) long. How do I go about extracting n-grams from the tweets, using my lexicons?
Here a simple solution:
import re
def lexicon_by_word(lexicons):
return {word:key for key in lexicons.keys() for word in lexicons[key]}
def split_sentences(st):
sentences = re.split(r'[.?!]\s*', st)
if sentences[-1]:
return sentences
else:
return sentences[:-1]
def ngrams_finder(lexicons, text):
lexicons_by_word = lexicon_by_word(lexicons)
def pattern(lexicons):
pattern = "|".join(lexicons_by_word.keys())
pattern = re.compile(pattern)
return pattern
pattern = pattern(lexicons)
ngrams = []
for sentence in split_sentences(text):
try:
ngram = []
for result in pattern.findall(sentence):
ngram.append([result, lexicons_by_word[result]])
ngrams.append(ngram)
except IndexError: #if re.findall does not find anything
continue
return ngrams
# You could customize it
text = "Yesterday I had a coca cola, and a hot dog for lunch, and some bana split for desert. I liked the coke, but the banana in the banana split dessert was ripe"
lexicons = {
"food":["hot dog",
"banana",
"banana split"],
"beverage":["coke",
"cola",
"coca cola"],
}
print(ngrams_finder(lexicons, text))
split_sentences function taken from here: Splitting a sentence by ending characters
Not sure what you have tried so far, below is a solution using ngrams
in nltk
and dict()
from nltk import ngrams
tweet = "Yesterday I had a coca cola, and a hot dog for lunch, and some bana split for desert. I liked the coke, but the banana in the banana split dessert was ripe"
# Your lexicons
lexicon_food = ["hot dog", "banana", "banana split"]
lexicon_beverage = ["coke", "cola", "coca cola"]
lexicon_dict = {x: [x, 'Food'] for x in lexicon_food}
lexicon_dict.update({x: [x, 'Beverage'] for x in lexicon_beverage})
# Function to extract lexicon items
def extract(g, lex):
if ' '.join(g) in lex.keys():
return lex.get(' '.join(g))
elif g[0] in lex.keys():
return lex.get(g[0])
else:
pass
# Your task
out = [[extract(g, lexicon_dict) for g in ngrams(sentence.split(), 2) if extract(g, lexicon_dict)]
for sentence in tweet.replace(',', '').lower().split('.')]
print(out)
Output:
[[['coca cola', 'Beverage'], ['cola', 'Beverage'], ['hot dog', 'Food']],
[['coke', 'Beverage'], ['banana', 'Food'], ['banana split', 'Food']]]
Approach 2 (Avoid "coca cola" and "cola")
def extract2(sentence, lex):
extracted_words = []
words = sentence.split()
i = 0
while i < len(words):
if ' '.join(words[i:i+2]) in lex.keys():
extracted_words.append(lex.get(' '.join(words[i:i+2])))
i += 2
elif words[i] in lex.keys():
extracted_words.append(lex.get(words[i]))
i += 1
else:
i += 1
return extracted_words
out = [extract2(s, lexicon_dict) for s in tweet.replace(',', '').lower().split('.')]
print(out)
Output:
[[['coca cola', 'Beverage'], ['hot dog', 'Food']],
[['coke', 'Beverage'], ['banana', 'Food'], ['banana split', 'Food']]]
Noted that nltk
is not needed here.