Run this file to make sure all the packages in your environment are set up correctly.
Also, fill out the fields below.
print("My Name Is: <YOUR NAME>")
print("My Email Is: <YOUR EMAIL>")
Installed via pip install matplotlib numpy pandas
# Matplotlib Graphing Library
import matplotlib
import matplotlib.pyplot as plt
# Numpy for fast numeric computation
import numpy as np
# Pandas for R-like DataFrames
import pandas as pd
Installed via pip install requests
# Library for issueing human-readable requests to HTTP servers
import requests
Installed via pip install facebook-sdk praw tweepy
# Facebook-SDK Library
import facebook
# Reddit's Python API Wrapper
import praw
# Twitter + Python API Wrapper
import tweepy
Installed via conda install -c conda-forge basemap basemap-data-hires
from mpl_toolkits.basemap import Basemap
Installed via pip install scipy sklearn
import sklearn.cluster
import sklearn.feature_extraction
import sklearn.feature_extraction.text
import sklearn.metrics
import sklearn.preprocessing
Installed via pip install nltk textblob gensim
# NLTK, the Natural Language Toolkit
import nltk
import nltk.sentiment.util
import nltk.sentiment.vader
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
# Go ahead and download standard lexica
nltk.download("stopwords") # For removing "and", "or", "the", etc.
nltk.download("vader_lexicon") # For Twitter-centric sentiment analysis
# High-level Package for Text Analysis, relies on NLTK
from textblob import TextBlob
# Fast library for topic modeling
from gensim import matutils
from gensim.models import ldamulticore
from gensim.corpora import Dictionary
from gensim.models import TfidfModel
from gensim.models import atmodel
from gensim.models.phrases import Phrases
Installed via pip install networkx
import networkx as nx