Section #8 Solutions

November 16th, 2020


Written by Brahm Capoor, Parth Sarin, Juliette Woodrow and Peter Maldonado


Finding Grandparents

      
SAMPLE_INPUT = {
    'Khaled': ['Chibundu', 'Jesmyn'],
    'Daniel': ['Khaled', 'Eve'],
    'Jesmyn': ['Frank'],
    'Eve': ['Grace']
}
​
​
def add_grandchildren(grandparents_dictionary, grandparent, new_grandchildren):
    if grandparent not in grandparents_dictionary:
        # if we haven't seen this grandparent before, add them to the dictionary
        grandparents_dictionary[grandparent] = []
​
    # add the new grandchildren to the grandparent's list of 
    # grandchildren
    current_grandchildren = grandparents_dictionary[grandparent]
    current_grandchildren += new_grandchildren
​
​
def find_grandchildren(parents_dictionary):
    """
    >>> find_grandchildren(SAMPLE_INPUT)
    {'Khaled': ['Frank'], 'Daniel': ['Chibundu', 'Jesmyn', 'Grace']}
    """
    grandparents_dictionary = {}
    for parent in parents_dictionary:
        children = parents_dictionary[parent]
        for child in children:
            if child in parents_dictionary: # check if the child is a parent themselves
                grandchildren = parents_dictionary[child]
                add_grandchildren(grandparents_dictionary,
                                  parent, grandchildren)
    return grandparents_dictionary
      
    

Contact Tracing

      
TARGET = 'Rosalind'
LOCATION_FILE = 'locations.txt'


def create_paths_dict(location_file):
    """
    Processes the location file and returns a dictionary containing everyone's
    movements throughout the day.
    """
    output = {}

    with open(location_file) as f:
        for line in f:
            parts = line.strip().split(', ')
            name = parts[0]
            time = parts[1]
            loc = parts[2]

            if name not in output:
                output[name] = {}

            output[name][time] = loc

    return output


def paths_intersected(person_one, person_two):
    """
    Returns whether the paths of person one and person two ever intersected
    throughout the day.
    """
    for time in person_one:
        if person_one[time] == person_two[time]:
            return True

    return False


def find_contacts(target, location_file):
    """
    Computes and returns a list of people that the target came in contact with
    by processing the location file.

    Arguments
    ---------
    target (str) -- The name of the person to compare everyone against.
    location_file (str) -- The name of the file that contains the location
        information for people throughout the day.
    
    Doctests
    --------
    >>> find_contacts('Parth', 'locations.txt')
    ['Leonida', 'Vashti', 'Alexis', 'Salvador', 'Elnora', 'Brahm', 'Cecily', 'Misty', 'Serina', 'Eddy', 'Jesusa', 'Deidre', 'Blondell', 'Yuonne', 'Forest', 'Russell', 'Sheilah', 'Kai', 'Tori', 'Hayley', 'Sebrina', 'Henry', 'Etta', 'Lia', 'Particia', 'Elena', 'Leanna', 'Latrisha', 'Juliette', 'Chuck', 'Eboni', 'Glennie', 'Stacee', 'Rosalind', 'Neva', 'Lona', 'Blake', 'Mehran', 'Jolyn', 'Kathe', 'Monserrate', 'Tabitha', 'Buford', 'Lacy', 'Meggan', 'Reita', 'Parth', 'Jolie', 'Marisela', 'Jeri']
    >>> find_contacts('Kara', 'locations.txt')
    ['Leonida', 'Elnora', 'Brahm', 'Barabara', 'Garrett', 'Kenyatta', 'Wade', 'Nakia', 'Lashay', 'Blondell', 'Lou', 'Devora', 'Pia', 'Kai', 'Hayley', 'Etta', 'Particia', 'Chris', 'Latoya', 'Juliette', 'Ina', 'Chuck', 'Will', 'Peter', 'Johnny', 'Kara', 'Lilia', 'Eboni', 'Stacee', 'Delmy', 'Rosalind', 'Refugia', 'Blake', 'Buddy', 'Kathe', 'Angelyn', 'Tabitha', 'Mauricio', 'Madelyn', 'Kylie', 'Trinity', 'Bradley', 'Jolie', 'Sylvia', 'Marisela']
    """
    all_paths = create_paths_dict(location_file)
    target_path = all_paths[target]

    # Optionally remove the target from all_paths

    output = []
    for suspect in all_paths:
        suspect_path = all_paths[suspect]

        if paths_intersected(suspect_path, target_path):
            output.append(suspect)

    return output


def main():
    print(find_contacts(TARGET, LOCATION_FILE))


if __name__ == '__main__':
    main()
      
    

ASCII Art

      
IMG_CHARS = '▓▒░ '
BREAKPOINTS = (64, 128, 192)


def convert_pixel_to_ascii(pixel):
    """
    Converts a single pixel to ASCII.

    Arguments
    ---------
    pixel (simpleimage.Pixel) -- The pixel to be converted to ASCII.
    """
    avg = (pixel.red + pixel.blue + pixel.green) / 3

    # Compare avg to the breakpoints (see the bisect library for a more
    # efficient way to do this, if interested).
    if avg < BREAKPOINTS[0]:
        return IMG_CHARS[0]
    elif avg < BREAKPOINTS[1]:
        return IMG_CHARS[1]
    elif avg < BREAKPOINTS[2]:
        return IMG_CHARS[2]
    else:
        return IMG_CHARS[3]



def convert_img_to_ascii(img_file):
    """
    Converts an image to ASCII characters stored in IMG_CHARS based on the
    brightness breakpoints in BREAKPOINTS and prints out the resulting ascii.
    """
    img = SimpleImage(img_file)
    # resize_img_for_ascii(img) # If you'd like to use your own image.

    img_ascii = []
    for y in range(img.height):
        row = ""
        for x in range(img.width):
            pixel = img.get_pixel(x, y)
            row += convert_pixel_to_ascii(pixel)
        print(row)
      
    

Sentiment Analysis

      
EMOTION = 'surprise'

def make_emotion_dict(filename):
	emotion_dict = {}
	for line in open(filename):
		splits = line.split()
		
		if splits[1] == EMOTION:
			curr_word = splits[0]
			value = int(splits[2])
			
			emotion_dict[curr_word] = value
	
	return emotion_dict

def read_file(filename, emotion_dict, stoplist):
	top_tweet = ""
	top_score = -1
	for line in open(filename):
		splits = line.split()
		shortened_tweet = remove_stop_words(splits, stoplist)
		score = sum_tweet(shortened_tweet, emotion_dict)
		if score >= top_score:
			top_score = score
			top_tweet = line


	return top_tweet, top_score

def remove_stop_words(line, stoplist):
	updated_tweet = []
	for word in line: 
		if word.lower() not in stoplist:
			updated_tweet.append(word)
	return updated_tweet


def sum_tweet(tweet, emotion_dict):
	"""
	>>> emotion_dict = {'happy':1, 'birthday':1}
    >>> sum_tweet(['Happy', 'Birthday', 'Brahm!'], emotion_dict)
    2
    """
	score = 0
	for word in tweet:
		if word.lower() in emotion_dict:
			score += emotion_dict[word.lower()]
	return score


def build_stop_list():
	stoplist = []
	fp = open("english.stop")
	stoplist = fp.read()
	fp.close()
	return stoplist

def main():
	#creates a list of all words that you want to remove from each tweet 
	stoplist = build_stop_list()

	emotion_dict = make_emotion_dict('emotion-lexicon.txt')

	top_tweet, top_score = read_file('brahms_puns.txt', emotion_dict, stoplist)
	print(top_score, top_tweet)

      
    

Meme Generator

      
from simpleimage import SimpleImage

FONT_FILE = 'impact.ttf'
SIZE = 50
COLOR = 'white'

class MemeGenerator:

    def __init__(self, filename):
        self.texts = []
        self.image_filename = filename

    def set_image(self, filename):
        self.image_filename = filename

    def add_text(self, text, x, y):
        self.texts.append((text, x, y))

    def render(self):
        img = SimpleImage(self.image_filename)
        for text, x, y in self.texts:
            img.create_text(text, x, y, FONT_FILE, color=COLOR, size=SIZE)
        img.show()