#!/usr/bin/env python -i # Requires numpy+scipy for running simulation, and PIL for image generation. import itertools import multiprocessing import os.path import time from PIL import Image from PIL import ImageDraw import cPickle import copy import csv import glob import images2gif from numpy import array import os import random import scipy import operator import scipy.integrate import math # Sets the seed to the current time for Random Number Generator random.seed() # Version of code (designates the folder to deposit data into) VERSION_STRING = 'Paper' # Location of requisite files (initial starting locations of world) WORKING_DIR = "" # Place to deposit finished runs DATA_DIR = ("data_version_" + str(VERSION_STRING) + "/") # Magic Numbers: ANCESTRAL_kMs_GLOBAL = [.1648743, .3405951, .9300492] # kMs of S,R, and P MUTANT_kMs_GLOBAL = [i / 100.0 for i in range(27, 34, 1)] # kMs of Mutant Rs vMax_GLOBAL = .610608 # vMax for all def run_many_simulations(settings_generator=None, number_of_replicates=1, number_of_runs=None): """ Runs multiple simulations concurrently Uses the standard multiprocessing library to manage multiple simulations on multi-core machines. Takes a finite generator as the settings parameter to call run_single_simulation. If a generator is not provided a long mutational sweep type run will be performed. Runs a simulator per processor and will consume all availble processor time. """ if settings_generator is None: AssertionError("Requires a setting generator") gen = cycle_settings_generator(settings_generator, number_of_replicates, number_of_runs) pool_workers = multiprocessing.Pool() for setting in gen: pool_workers.apply_async(run_single_simulation, (setting, )) pool_workers.close() pool_workers.join() def ensure_has_directory(f): """ Called with a file argument to make the directory of a file if needed. """ d = os.path.dirname(f) if not os.path.exists(d): os.makedirs(d) def treatments_generator(alone_intial_layout_path, community_intial_layout_path, number_of_transfers): """ Yields the 5 treatments (Full Restricted, Full Unrestricted, Alone Restricted, Permutation, Shadow) Requires csv files detailing the layout """ # Common Parameters settings_common = { "initial_mutation_freq": 0.0, "initial_mutation_proportion": 0.0, "mutation_rate": 0.01, "mutation_proportion": 0.5, "persistence_cutoff_value": 0.000275, "toxic_cutoff_value": 0.0, "migration_rate": (1.0 / 3), "number_of_transfers": number_of_transfers, "kMs": ANCESTRAL_kMs_GLOBAL + MUTANT_kMs_GLOBAL, "vMaxes": [vMax_GLOBAL] * len(ANCESTRAL_kMs_GLOBAL + MUTANT_kMs_GLOBAL), "count": -1, "record_layout_sequence": False, "permute_world": False, "shadow_world": False, "record_midpoint_transfer": True, } # The 3 treatments (Full Restricted, Full Unrestricted, Alone Restricted) treatment_re_com = { "run_type": "Full_Restricted", "initial_world_file": community_intial_layout_path, "is_restricted": True, "r_alone": False, "permute_world": False, "shadow_world": False, } treatment_un_com = { "run_type": "Full_Unrestricted", "initial_world_file": community_intial_layout_path, "is_restricted": False, "r_alone": False, "permute_world": False, "shadow_world": False, } treatment_re_alone = { "run_type": "Alone_Restricted", "initial_world_file": alone_intial_layout_path, "is_restricted": True, "r_alone": True, "permute_world": False, "shadow_world": False, } treatment_permutation = { "run_type": "Permutation", "initial_world_file": community_intial_layout_path, "is_restricted": True, "r_alone": False, "permute_world": True, "shadow_world": False, } treatment_shadow = { "run_type": "Shadow", "initial_world_file": community_intial_layout_path, "is_restricted": True, "r_alone": True, "permute_world": False, "shadow_world": True, } # Add Common Parameters and Deviations to each treatment, and yield result treatments = [treatment_re_com, treatment_un_com, treatment_re_alone, treatment_permutation, treatment_shadow] for treatment in treatments: treatment.update(settings_common) yield copy.deepcopy(treatment) def cycle_settings_generator(settings_generator, number_of_replicates=None, number_of_runs=None): """ Yields a repeated list from a settings generator Requires a finite settings generator (like the km_sweep_generator) and the desired degree of replication, or failing that, the desired number of runs to generate a repeating iterable of settings. """ # List of unique settings list_of_settings = [settings for settings in settings_generator] # Yield desired number of settings if number_of_replicates is not None: number_of_runs = len(list_of_settings) * number_of_replicates elif number_of_runs is None or number_of_runs < 0: raise AssertionError("Must specific number of runs or replicates.") # Generate settings, each with an identifing count cycle_settings_generator = itertools.cycle(list_of_settings) for count in range(number_of_runs): settings_copy = copy.deepcopy(next(cycle_settings_generator)) settings_copy["count"] = count yield settings_copy def run_single_simulation(settings): """ Runs a simulation given a settings dictionary Parameters: settings = the settings dictionary containing the parameters for the run Also runs shadow world: A shadow run follows a native run, with same migrations but the shadow only has the resistant type (in the same locations as the native), and has independent mutations (initialy and each transfer) Parameters: settings = the settings dictionary containing the parameters for the run 1. Initialize native world 2. Initialize shadow world (copy only non resistant) 3. Migrate/Transfer Native world 4. Perform same migrations (but different mutations) 5. Limit Shadow Abundance Resistant Presence to same as native """ # Input id info to settings settings["time_started"] = time.strftime("%Y-%m-%d--%H-%M-%S") settings["unique_name"] = (settings["run_type"] + "_" + settings["time_started"] + "_" + str(settings["count"])) # Pickle for storage settings["pickle_destination"] = (DATA_DIR + settings["unique_name"] + ".pickle") # Debug help print(settings["unique_name"] + " started") # Create the starting metapopulation from the designated file # Pad the abundances to the needed length metapop = Metapopulation(settings["initial_world_file"], len(settings["kMs"])) if settings["shadow_world"]: shadow = Metapopulation(settings["initial_world_file"], len(settings["kMs"]), r_only=True) # Mutate some of the resistant if called for if (settings["initial_mutation_freq"] > 0.0 and settings["initial_mutation_proportion"] > 0.0): #Mutate initial world for _, _, well in metapop.enumerate(): if random.random() < settings["initial_mutation_freq"]: well.mutate(settings["initial_mutation_proportion"]) for _, _, well in shadow.enumerate(): if random.random() < settings["initial_mutation_freq"]: well.mutate(settings["initial_mutation_proportion"]) # Add starting layout to settings dictionary if settings["shadow_world"]: settings["initial_layout_native"] = copy.deepcopy(metapop) settings["initial_layout"] = copy.deepcopy(shadow) else: settings["initial_layout"] = copy.deepcopy(metapop) # Creates a list for the recording of the metapopulation at each time point if settings["record_layout_sequence"]: if settings["shadow_world"]: layout_sequence_native = [copy.deepcopy(metapop)] layout_sequence = [copy.deepcopy(shadow)] else: layout_sequence = [copy.deepcopy(metapop)] # Update metapopulation for i in range(settings["number_of_transfers"]): if settings["shadow_world"]: metapop, shadow = get_next_layout_shadow(metapop, shadow, settings) else: metapop = get_next_layout(metapop, settings) # Permute if desired if settings["permute_world"]: metapop = permute_resistant_wells(metapop) if settings["shadow_world"]: shadow = permute_resistant_wells(shadow) # If saving, add current time point if settings["record_layout_sequence"]: if settings["shadow_world"]: layout_sequence_native.append(copy.deepcopy(metapop)) layout_sequence.append(copy.deepcopy(shadow)) else: layout_sequence.append(copy.deepcopy(metapop)) if ("record_midpoint_transfer" in settings and settings["record_midpoint_transfer"]): if i == settings["number_of_transfers"] / 2: if settings["shadow_world"]: settings["midpoint_layout_native"] = copy.deepcopy(metapop) settings["midpoint_layout"] = copy.deepcopy(shadow) else: settings["midpoint_layout"] = copy.deepcopy(metapop) # Add ending state to settings if settings["shadow_world"]: settings["end_layout_native"] = metapop settings["end_layout"] = shadow else: settings["end_layout"] = metapop if settings["record_layout_sequence"]: if settings["shadow_world"]: settings["layout_sequence_native"] = layout_sequence_native settings["layout_sequence"] = layout_sequence else: settings["layout_sequence"] = layout_sequence # Save settings dictionary as a 'pickle' to file ensure_has_directory(settings["pickle_destination"]) cPickle.dump(settings, open(settings["pickle_destination"], "wb"), -1) # Debug help print settings["unique_name"], " saved" return settings def permute_resistant_wells(metapop): """ Permutes/shuffles the wells that are resistant only """ def is_r_only(well): "Helper Function, true if only resistant" s, r, p = well.abundances_by_type() return (s == 0.0 and p == 0.0 and r > 0.0) # Copy metapopulation and initial lists for the wells to be permuted metapop = copy.deepcopy(metapop) coordinates = list() wells = list() # Gather resistant only wells for x, y, well in metapop.enumerate(): if is_r_only(well): coordinates.append((x, y)) wells.append(well) # Permute/shuffle wells random.shuffle(wells) # Replace for x, y in coordinates: well = wells.pop() metapop.set(x, y, well) return metapop def get_csv_summary(list_of_pickle_paths=None, file_output=None, data_dir_names=None, use_midpoint_layout=False): """ Generates a csv file of summary data for each run in a list list_of_runs = list of settings files (usually unpickled by unpickle_folder) file_output = name of file to export to (should end with .csv) """ def get_km_fitness(competitor_kMs=None): """ Returns the fitness of each possible type kM relative to resistant ancestor """ if competitor_kMs is None: competitor_kMs = ANCESTRAL_kMs_GLOBAL + MUTANT_kMs_GLOBAL vMaxes = [vMax_GLOBAL] * 2 ancestor_kM = ANCESTRAL_kMs_GLOBAL[1] kM_to_fitness = list() for competitor_kM in competitor_kMs: start_well = Well([1, 1]).dilute() end_well = copy.deepcopy(start_well) end_well.grow([ancestor_kM, competitor_kM], vMaxes) ancestor_i, competitor_i = start_well.abundances ancestor_e, competitor_e = end_well.abundances fitness = (math.log(competitor_e / competitor_i) / math.log(ancestor_e / ancestor_i)) kM_to_fitness.append(fitness) return kM_to_fitness def get_mean_fitness(total_abundances, ordered_rel_fit): """determines the mean relative fitness of a metapopulation""" if len(total_abundances) != len(ordered_rel_fit): raise AssertionError("abundances ") # Only consider resistant type total_abundances = [total_abundances[1]] + total_abundances[3:] ordered_rel_fit = [ordered_rel_fit[1]] + ordered_rel_fit[3:] sum_ab = sum(total_abundances) if sum_ab <= 0.0: return 0.0 proportion_ab = [ab / sum_ab for ab in total_abundances] return sum(map(operator.mul, proportion_ab, ordered_rel_fit)) def get_metapopulation_total_abundance(run, metapop=None): """ Returns a list of total abundance for each possible strain of end_layout. """ def float_equal(a, b): return abs(a - b) < .00001 # Initiate tally if metapop is None: metapop = run["end_layout"] strains_present = run["kMs"] total_abundances = [0] * len(strains_present) # Gather tally for x, y, well in metapop.enumerate(): for i, abundance in enumerate(well.abundances): total_abundances[i] += abundance # Convert tally to include all strains possible_kMs = ANCESTRAL_kMs_GLOBAL + MUTANT_kMs_GLOBAL all_abundances = list() i = 0 for kM in run["kMs"]: while not float_equal(kM, possible_kMs[i]): all_abundances.append(0.0) i += 1 all_abundances.append(total_abundances.pop(0)) i += 1 while len(all_abundances) < len(possible_kMs): all_abundances.append(0.0) return all_abundances if list_of_pickle_paths is None: list_of_pickle_paths = list() if data_dir_names is not None: storage_dir = os.path.dirname(os.path.dirname(DATA_DIR)) for data_dir_name in data_dir_names: search_string = os.path.join(os.path.join(storage_dir, data_dir_name), "*.pickle") list_of_pickle_paths.extend(glob.glob(search_string)) print("added " + search_string) print "Starting csv" csv_writer = csv.writer(open(file_output, "wb"), dialect="excel-tab") possible_kMs = ANCESTRAL_kMs_GLOBAL + MUTANT_kMs_GLOBAL # Columns are all non-collection data + kM abundances header = ['count', 'initial_mutation_freq', 'initial_mutation_proportion', 'initial_world_file', 'is_restricted', 'migration_rate', 'mutation_proportion', 'mutation_rate', 'number_of_transfers', 'permute_world', 'persistence_cutoff_value', 'r_alone', 'run_type', 'shadow_world', 'time_started', 'toxic_cutoff_value', 'unique_name'] players = ["Sen", "Res", "Pro", "M27", "M28", "M29", "M30", "M31", "M32", "M33"] csv_writer.writerow(header + players + ["mean_ending_fitness"]) for pickle_path in list_of_pickle_paths: run = cPickle.load(open(pickle_path, "rb")) row = list() for item in header: row.append(run[item]) if use_midpoint_layout: abundances = get_metapopulation_total_abundance(run, run['midpoint_layout']) else: abundances = get_metapopulation_total_abundance(run, run['end_layout']) if abundances is not None: row.extend(abundances) row.append(get_mean_fitness(abundances, get_km_fitness())) csv_writer.writerow(row) class Metapopulation(object): """ A grid of Wells and the functions to alter them """ def __init__(self, file_path=None, pad_length=None, grid=None, x_length=0, y_length=0, r_only=False): """ Creates a Metapopulation object Arguments are x and y lengths, and a grid (initialized to Nones by default """ # Initialize grid if file_path: self.grid = self._initialize_world(file_path, pad_length, r_only) elif grid: self.grid = grid else: self.grid = [[None for y in range(y_length)] for x in range(x_length)] # Set bounds self.x_bound = len(self.grid) self.y_bound = len(self.grid[0]) # Check for filled world if a layout was provided if file_path or grid: if not self._filled_grid(): raise AssertionError("Grid not completely filled") def _initialize_world(self, init_world_file_path, pad_length, r_only=False): """ Create a starting distribution of players in a world from a file Parameters: init_world_file_path = path to file containing starting pattern, csv pad_length = the number of types of strains allowed r_only = True if the world should only contain resistant """ # Translation map from number/index to Well contents world_file_code = { 1: Well([0.0, 1.0, 0.0]), 2: Well([0.0, 0.0, 1.0 * 0.20413]), 3: Well([1.0, 0.0, 0.0]), 4: Well([.5, 0.0, .5 * 0.20413]), 5: Well([.16, 0.0, .84 * 0.20413]), 6: Well([.05, 0.0, .95 * 0.20413]), 7: Well([.018, 0.0, .982 * 0.20413]), 8: Well([.0062, 0.0, .9938 * 0.20413]), 9: Well([.5, .5, 0.0]), 10: Well([.95, .05, 0.0]), 11: Well([.995, .005, 0.0]) } def make_well_r_only(well): """Removes sensitive and producer from well""" well = copy.deepcopy(well) well.abundances[0] = 0.0 well.abundances[2] = 0.0 return well r_only_code = dict() for index, well in world_file_code.iteritems(): r_only_code[index] = make_well_r_only(well) reader = csv.reader(open(init_world_file_path, "rU"), dialect='excel') # Build grid by translating csv file grid = list() for row in reader: grid_row = list() for element in row: # Add copy of well, padded if r_only: well = r_only_code[int(element)] else: well = world_file_code[int(element)] grid_row.append(well.copy(pad_length)) grid.append(grid_row) return grid def _filled_grid(self): """ Checks that every position is not None """ expected_tally = self.x_bound * self.y_bound for x, y, item in self.enumerate(): if item is not None: expected_tally -= 1 return expected_tally == 0 def get(self, x, y): """Returns the well at (x, y)""" return self.grid[x][y] def set(self, x, y, well): """Sets the position (x, y) to given well""" self.grid[x][y] = well def enumerate(self): """ Iterates the grid by row, then column Returns a tuple of the x and y coordinates and the object """ for y in range(self.y_bound): for x in range(self.x_bound): yield (x, y, self.grid[x][y]) def __repr__(self): return "Metapopulation(grid={0})".format(self.grid) def get_migration(self, x, y, is_restricted, given_destination=True): """Returns the coordinates of a migration event Parameters: x,y = the column,well of the focal well is_restricted = true for restricted migrations (4 wells), false for global migration given_destination = true if focal well is destination of migration, false if source Returns the x,y of the source, and the x,y of the destination """ rand = random.random() new_x = x new_y = y #Pick from 4 neighbors if is_restricted: if rand < .25: new_x = x + 1 elif rand < .5: new_x = x - 1 elif rand < .75: new_y = y + 1 else: new_y = y - 1 #Make sure the new well is in the torodial boundries new_x = (new_x + self.x_bound) % self.x_bound new_y = (new_y + self.y_bound) % self.y_bound else: #Pick from entire world while True: # Loop until have neighbor that is not self new_x = random.randrange(0, self.x_bound) new_y = random.randrange(0, self.y_bound) if x != new_x or y != new_y: break if given_destination: return (new_x, new_y, x, y) return (x, y, new_x, new_y) class Well(object): """ Contains the abundances of the players (SRP + Mutants) and methods """ def __init__(self, given_abundances): """ Initialize well with abundances of types given_abundances = a list of numbers denoting abundance in this order [Sensitive Ancestor, Resistant Ancestor, Producer Ancestor, + any Resistant Mutants] """ self.abundances = given_abundances self.length = len(given_abundances) # Indices of self.abundances that are of the Resistant type self._indices_of_R = [1] + range(3, self.length) def add(self, other): """ Give resulting well after migration with other other = a Well with the same number of abundances """ if self.length != other.length: raise AssertionError("Must have same abundance types to mix") new_abundances = list() for i in range(self.length): new_abundances.append(self.abundances[i] + other.abundances[i]) return Well(new_abundances) def set_abundance_of_resistant(self, value=1): """ Make resistant abundances equal to value, by proportion """ # Check that operation can be done ab_of_r = self.abundance_of_resistant() assert (ab_of_r > 0.0 and value <= 0.0), "abundance need be > 0" # Scale resistant types if value <= 0: for i in self._indices_of_R: self.abundances[i] = 0.0 else: scale = ab_of_r / value for i in self._indices_of_R: self.abundances[i] = self.abundances[i] / scale def persistence_cutoff(self, persistence_cutoff_value=0): """ Removes abundances that are below the persistance cutoff """ for i in range(self.length): if self.abundances[i] < persistence_cutoff_value: self.abundances[i] = 0.0 def toxic_cutoff(self, toxic_cutoff_value=0): """ Kills the sensitive If producer (index 2) is above cutoff_value, kill sensitive (index 0) """ if self.abundances[2] > toxic_cutoff_value: self.abundances[0] = 0.0 def dilute(self, dilution_factor=0.025): """ Return well that has each abundance reduced by dilution. """ abundances = [abundance * dilution_factor for abundance in self.abundances] return Well(abundances) def abundance_of_resistant(self): """ Abundance of resistant type (ancestor + mutant) """ return sum([self.abundances[i] for i in self._indices_of_R]) def abundances_by_type(self): """ Returns the abundances of the three types Returns a tuple of cumulative abundances (SRP order) """ return (self.abundances[0], self.abundance_of_resistant(), self.abundances[2]) def mutate(self, mutation_proportion=1.0): """ Mutate a portion of Resistant class to a random resistant class The mutation_proportion of the total abundance of the Resistant class (Ancestor + Mutant) is converted to a one of the mutant classes with equal probability. """ # Indices of self.abundances that are of the Resistant class indices_of_R = [1] + range(3, self.length) # Total proportion of Well that is R (ancestor + mutant) total = self.abundance_of_resistant() # Chosen type to convert to chosen = random.choice(indices_of_R) # Convert to mutant for i in indices_of_R: self.abundances[i] *= (1 - mutation_proportion) self.abundances[chosen] += mutation_proportion * total def copy(self, pad_length=None): """ Copies well, pading length with 0's if needed """ abundances = copy.deepcopy(self.abundances) if pad_length is not None and pad_length > len(abundances): abundances.extend([0.0] * (pad_length - len(abundances))) return Well(abundances) def __repr__(self): return "Well({0})".format(self.abundances) def grow(self, kMs=ANCESTRAL_kMs_GLOBAL, vMaxes=[vMax_GLOBAL] * 3): """ Returns the strains after 12 hours of growth kMs/vMaxes = the list of kMs,vMaxes (both in the same order as abundances) default is the SRPs values """ if self.length != len(vMaxes) or self.length != len(kMs): raise AssertionError("ODE needs proper vMaxes/kMs lengths") # Convert lists of numbers to scipy arrays a_kMs = array(kMs) a_vMaxes = array(vMaxes) strains_initial = array(self.abundances) # Short curcuit process if empty well if sum(strains_initial) <= 0: return def dx_dy(strains, time): """ODE describing the growth rate of the strains""" a_nutrients = array([1 - scipy.sum(strains)] * self.length) return ((a_vMaxes * a_nutrients) / (a_kMs + a_nutrients)) * strains #time steps 0 and 12 hours, times = array([0, 12]) strains_end = scipy.integrate.odeint(dx_dy, strains_initial, times)[-1] # Yields a new well with the ending contents self.abundances = list(strains_end) def get_next_layout_shadow(native, shadow, settings): """ Returns a native and shadow metapopulation after a transfer Steps: Dilution, Migration, Removal, Growth, Mutation Parameters: metapop = current state of the world settings = parameters for migration and interaction """ #New Worlds next_native = copy.deepcopy(native) next_shadow = copy.deepcopy(shadow) #Dilute for x, y, well in next_native.enumerate(): next_native.set(x, y, well.dilute()) for x, y, well in next_shadow.enumerate(): next_shadow.set(x, y, well.dilute()) #Migrate for x, y, well in next_native.enumerate(): # Check if well is destination of a migration if random.random() < settings["migration_rate"]: # Get migration source and destination migration = native.get_migration(x, y, settings["is_restricted"]) source_x, source_y, dest_x, dest_y = migration # Get resulting mixed well source_well = native.get(source_x, source_y) dest_well = native.get(dest_x, dest_y) mixed_well = dest_well.add(source_well).dilute() # Perform immigration into focal well next_native.set(dest_x, dest_y, mixed_well) # Repeat migration in Shadow source_well = shadow.get(source_x, source_y) dest_well = shadow.get(dest_x, dest_y) mixed_well = dest_well.add(source_well).dilute() next_shadow.set(dest_x, dest_y, mixed_well) #Remove from native only for x, y, well in next_native.enumerate(): well.persistence_cutoff(settings["persistence_cutoff_value"]) well.toxic_cutoff(settings["toxic_cutoff_value"]) #Grow both for x, y, well in next_native.enumerate(): well.grow(settings["kMs"], settings["vMaxes"]) for x, y, well in next_shadow.enumerate(): well.grow(settings["kMs"], settings["vMaxes"]) #Mutate both if (settings["mutation_rate"] > 0.0 and settings["mutation_proportion"] > 0.0): for x, y, well in next_native.enumerate(): if random.random() < settings["mutation_rate"]: well.mutate(settings["mutation_proportion"]) for x, y, well in next_shadow.enumerate(): if random.random() < settings["mutation_rate"]: well.mutate(settings["mutation_proportion"]) # Limit Shadow Abundance for x, y, well in next_native.enumerate(): ab = well.abundance_of_resistant() next_shadow.get(x, y).set_abundance_of_resistant(ab) return (next_native, next_shadow) def get_next_layout(metapop, settings): """ Returns a new metapopulation after a transfer Steps: Dilution, Migration, Removal, Growth, Mutation Parameters: metapop = current state of the world settings = parameters for migration and interaction """ #New World next_metapop = copy.deepcopy(metapop) #Dilute for x, y, well in next_metapop.enumerate(): next_metapop.set(x, y, well.dilute()) #Migrate for x, y, well in next_metapop.enumerate(): # Check if well is destination of a migration if random.random() < settings["migration_rate"]: #Get migration source and destination migration = metapop.get_migration(x, y, settings["is_restricted"]) source_x, source_y, dest_x, dest_y = migration source_well = metapop.get(source_x, source_y) dest_well = metapop.get(dest_x, dest_y) mixed_well = dest_well.add(source_well).dilute() #Perform immigration into focal well next_metapop.set(dest_x, dest_y, mixed_well) #Remove for x, y, well in next_metapop.enumerate(): well.persistence_cutoff(settings["persistence_cutoff_value"]) well.toxic_cutoff(settings["toxic_cutoff_value"]) #Grow for x, y, well in next_metapop.enumerate(): well.grow(settings["kMs"], settings["vMaxes"]) #Mutate if (settings["mutation_rate"] > 0.0 and settings["mutation_proportion"] > 0.0): for x, y, well in next_metapop.enumerate(): if random.random() < settings["mutation_rate"]: well.mutate(settings["mutation_proportion"]) return next_metapop def unpickle_folder(data_dir=DATA_DIR): """ Locates pickled run files from directory and returns a unpickled list """ pickle_files = glob.glob(data_dir + "*.pickle") print "Found Pickled Files: ", len(pickle_files) run_data_total = list() for pickle_file in pickle_files: data = cPickle.load(open(pickle_file, "rb")) run_data_total.append(data) return run_data_total def create_animated_gif(run, file_output=None, scale_of_well=10, color_style="type", create_native=False, duration=.1, loops=0): """ Generate a animated gif for the run (requires that each timepoint is saved) """ assert ("layout_sequence" in run or (create_native and "layout_sequence_native" in run)), "need sequence" if file_output is None: base, extension = os.path.splitext(run["pickle_destination"]) file_output = base + "_" + color_style + ".gif" if create_native: layout_sequence = run["layout_sequence_native"] else: layout_sequence = run["layout_sequence"] images = list() for metapop in layout_sequence: img = create_metapopulation_image(metapop=metapop, file_output=None, scale_of_well=scale_of_well, color_style=color_style, settings=run) images.append(img) # write GIF animation images2gif.writeGif(file_output, images, duration, loops, dither=0) def create_metapopulation_image(metapop, file_output=None, scale_of_well=10, color_style="type", settings=None): """ Generates an image of the metapopulation metapop = the metapopulation from a run file_output = save destination for gif file (*.gif), defaults to screen scale_of_well = the number of pixels along the length of a well """ # Two helper functions that return a color for a given well def get_color_by_type(well, settings=None): """ Returns RGB of colors by well Blue = Sensitive Yellow = Resistant Red = Producer """ # Get relative proportions of each player sensitive, resistant, producer = well.abundances_by_type() scale = (sensitive + resistant + producer) / 255.0 if scale <= 0.0: return (0, 0, 0) # Empty Well, black sensitive = sensitive / scale resistant = resistant / scale producer = producer / scale # Return the proportions in 256 scale return ( int((resistant + producer)), # Red int(resistant), # Green int(sensitive) # Blue ) def get_color_by_type_and_black_mutants(well, settings=None): """ Normal colors, but wells with mutant are black """ colors_srpm = [(50, 50, 255), # Sensitive (255, 255, 0), # Resistant (255, 50, 50), # Producer (0, 0, 0), # Mutant ] if sum(well.abundances[3:]) > 0.0: return colors_srpm[3] # Mutant present srp_abs = well.abundances_by_type() max_index = srp_abs.index(max(srp_abs)) return colors_srpm[max_index] def get_color_by_mutation(well, settings): """ Returns the color of a well by mutation abundance grey-scale color with only resistant shown Brighter means smaller kM """ dim = 127 bright = 255 # If no resistant if well.abundance_of_resistant() <= 0.0: # No resistant, black return (0, 0, 0) kMs = settings["kMs"] abundances = well.abundances kMs_abs = zip(kMs, abundances) # remove sensitive and producers del kMs_abs[0] del kMs_abs[1] if len(kMs) <= 1: # No mutants, ancestor color return (dim, dim, dim) kMs_abs.sort() kMs, abundances = zip(*kMs_abs) # normalize abundances total = sum(abundances) abundances = [ab / total for ab in abundances] # Scale brightness so that smaller kMs are brighter slope = ((dim - bright) / (max(kMs) - min(kMs))) intercept = bright - (slope * min(kMs)) bright_scaling = [(kM * slope + intercept) for kM in kMs] # Tally intensity for well tally = 0 for i in range(len(abundances)): tally += bright_scaling[i] * abundances[i] tally = int(tally) return (tally, tally, tally,) # Ensure that settings are provided if color_style is "mutation" if color_style == "type": get_color = get_color_by_type elif color_style == "mutation": assert "kMs" in settings, "need settings for mut image" get_color = get_color_by_mutation elif color_style == "type+mutant": get_color = get_color_by_type_and_black_mutants else: assert False, " is an improper color_style" # Create a blank image im = Image.new('RGB', (metapop.x_bound * scale_of_well, metapop.y_bound * scale_of_well), (0, 0, 0, 0)) draw = ImageDraw.Draw(im) # Add each well to image for x, y, well in metapop.enumerate(): color = get_color(well, settings) draw.rectangle((x * scale_of_well, y * scale_of_well, x * scale_of_well + scale_of_well, y * scale_of_well + scale_of_well), fill=color) # Output if file_output is not None: ensure_has_directory(file_output) im.save(file_output) return im def small_treatments_generator(): """ Generator for 12x16 runs """ alone_path = WORKING_DIR + "initial_12x8_Alone.csv" full_path = WORKING_DIR + "initial_12x16_Full.csv" number_of_transfers = 36 * 4 small_treatments = treatments_generator(alone_path, full_path, number_of_transfers) for treatment in small_treatments: treatment["run_type"] = "Small_" + treatment["run_type"] yield copy.deepcopy(treatment) def large_treatments_generator(): """ Generator for 100x100 runs """ alone_path = WORKING_DIR + "initial_100x50_Alone.csv" full_path = WORKING_DIR + "initial_100x100_Full.csv" number_of_transfers = 100 * 4 large_treatments = treatments_generator(alone_path, full_path, number_of_transfers) for treatment in large_treatments: treatment["run_type"] = "Large_" + treatment["run_type"] yield copy.deepcopy(treatment) # Causes the execution of the main function if this module is run. # Required for multiprocessing to work if __name__ == "__main__": """ Executes the desired functions Runs top level functions from one place: - run_many_simulations() - create_grid_picture(grid,file_output=None, scale_of_well=5) - get_csv_summary() """ print("Start!") #gen = small_treatments_generator() #run_many_simulations(gen, 10) print("Done!")