diff --git a/.github/workflows/config.yml b/.github/workflows/config.yml index e309d38b2..c1c0c9698 100644 --- a/.github/workflows/config.yml +++ b/.github/workflows/config.yml @@ -58,6 +58,10 @@ jobs: run: | python -m pip install pylint python -m pylint --disable=all --enable=unused-import axelrod/strategies/_strategies.py + - name: Check format + run: | + python -m pip install black + python -m black -l 80 . --check - name: Check that installs run: | python setup.py install diff --git a/.isort.cfg b/.isort.cfg index 46cb98577..c28f73f7c 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -4,4 +4,4 @@ multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 combine_as_imports = True -line_length = 88 +line_length = 80 diff --git a/.prepare-commit-msg.py b/.prepare-commit-msg.py index 780f8594b..dac608fd9 100755 --- a/.prepare-commit-msg.py +++ b/.prepare-commit-msg.py @@ -41,7 +41,9 @@ if branch.startswith(issue_prefix): issue_number = re.match("%s(.*)" % issue_prefix, branch).group(1) - print("prepare-commit-msg: Prepending [#%s] to commit message" % issue_number) + print( + "prepare-commit-msg: Prepending [#%s] to commit message" % issue_number + ) with open(commit_msg_filepath, "r+") as f: content = f.read() diff --git a/axelrod/_strategy_utils.py b/axelrod/_strategy_utils.py index 6daca1c81..69d7d3215 100644 --- a/axelrod/_strategy_utils.py +++ b/axelrod/_strategy_utils.py @@ -113,7 +113,7 @@ def simulate_match(player_1, player_2, strategy, rounds=10): def _calculate_scores(p1, p2, game): - """Calculates the scores for two players based their history. + """Calculates the scores for two players based on their history. Parameters ---------- @@ -142,9 +142,9 @@ def look_ahead(player1, player2, game, rounds=10): Parameters ---------- - player_1: Player + player1: Player The player that will look ahead. - player_2: Player + player2: Player The opponent that will be inspected. game: Game The Game object used to score rounds. diff --git a/axelrod/compute_finite_state_machine_memory.py b/axelrod/compute_finite_state_machine_memory.py index b46ad58db..686698c5a 100644 --- a/axelrod/compute_finite_state_machine_memory.py +++ b/axelrod/compute_finite_state_machine_memory.py @@ -113,7 +113,10 @@ def get_accessible_transitions( if trans.state in accessible_states: accessible_transitions[ (trans.state, trans.last_opponent_action) - ] = (trans.next_state, trans.next_action) + ] = ( + trans.next_state, + trans.next_action, + ) return accessible_transitions diff --git a/axelrod/ecosystem.py b/axelrod/ecosystem.py index 4c3bfb907..aa2ad9a93 100644 --- a/axelrod/ecosystem.py +++ b/axelrod/ecosystem.py @@ -33,7 +33,7 @@ def __init__( population: List[int] = None, ) -> None: """Create a new ecosystem. - + Parameters ---------- results: ResultSet @@ -83,7 +83,7 @@ def __init__( def reproduce(self, turns: int): """Reproduce populations according to the payoff matrix. - + Parameters ---------- turns: int diff --git a/axelrod/evolvable_player.py b/axelrod/evolvable_player.py index 7dd4073b3..ce32bf0e5 100644 --- a/axelrod/evolvable_player.py +++ b/axelrod/evolvable_player.py @@ -7,6 +7,7 @@ class InsufficientParametersError(Exception): """Error indicating that insufficient parameters were specified to initialize an Evolvable Player.""" + def __init__(self, *args): super().__init__(*args) @@ -49,7 +50,7 @@ def create_new(self, **kwargs): def serialize_parameters(self): """Serialize parameters.""" pickled = dumps(self.init_kwargs) # bytes - s = base64.b64encode(pickled).decode('utf8') # string + s = base64.b64encode(pickled).decode("utf8") # string return s @classmethod diff --git a/axelrod/fingerprint.py b/axelrod/fingerprint.py index 264856a15..8ef34d60b 100644 --- a/axelrod/fingerprint.py +++ b/axelrod/fingerprint.py @@ -42,7 +42,7 @@ def _create_points(step: float, progress_bar: bool = True) -> List[Point]: num = int((1 / step) // 1) + 1 if progress_bar: - p_bar = tqdm.tqdm(total=num**2, desc="Generating points") + p_bar = tqdm.tqdm(total=num ** 2, desc="Generating points") points = [] for x in np.linspace(0, 1, num): @@ -88,8 +88,8 @@ def _create_jossann(point: Point, probe: Any) -> Player: if x + y >= 1: joss_ann = DualTransformer()( - JossAnnTransformer((1 - x, 1 - y))( - probe_class))(**init_kwargs) + JossAnnTransformer((1 - x, 1 - y))(probe_class) + )(**init_kwargs) else: joss_ann = JossAnnTransformer((x, y))(probe_class)(**init_kwargs) return joss_ann @@ -177,7 +177,10 @@ def _generate_data(interactions: dict, points: list, edges: list) -> dict: """ edge_scores = [ np.mean( - [compute_final_score_per_turn(scores)[0] for scores in interactions[edge]] + [ + compute_final_score_per_turn(scores)[0] + for scores in interactions[edge] + ] ) for edge in edges ] @@ -215,7 +218,9 @@ def _reshape_data(data: dict, points: list, size: int) -> np.ndarray: class AshlockFingerprint(object): def __init__( - self, strategy: Union[type, Player], probe: Union[type, Player] = axl.TitForTat + self, + strategy: Union[type, Player], + probe: Union[type, Player] = axl.TitForTat, ) -> None: """ Parameters @@ -277,7 +282,7 @@ def fingerprint( processes: int = None, filename: str = None, progress_bar: bool = True, - seed: int = None + seed: int = None, ) -> dict: """Build and play the spatial tournament. @@ -323,8 +328,11 @@ def fingerprint( self.step = step self.spatial_tournament = axl.Tournament( - tourn_players, turns=turns, repetitions=repetitions, edges=edges, - seed=seed + tourn_players, + turns=turns, + repetitions=repetitions, + edges=edges, + seed=seed, ) self.spatial_tournament.play( build_results=False, @@ -432,7 +440,7 @@ def fingerprint( processes: int = None, filename: str = None, progress_bar: bool = True, - seed: int = None + seed: int = None, ) -> np.array: """Creates a spatial tournament to run the necessary matches to obtain fingerprint data. @@ -479,7 +487,7 @@ def fingerprint( turns=turns, noise=noise, repetitions=repetitions, - seed=seed + seed=seed, ) tournament.play( filename=filename, @@ -516,7 +524,9 @@ def analyse_cooperation_ratio(filename): opponent in each turn. The ith row corresponds to the ith opponent and the jth column the jth turn. """ - did_c = np.vectorize(lambda actions: [int(action == "C") for action in actions]) + did_c = np.vectorize( + lambda actions: [int(action == "C") for action in actions] + ) cooperation_rates = {} df = dd.read_csv(filename) @@ -525,7 +535,10 @@ def analyse_cooperation_ratio(filename): df = df[df["Player index"] == 0][["Opponent index", "Actions"]] for _, row in df.iterrows(): - opponent_index, player_history = row["Opponent index"], row["Actions"] + opponent_index, player_history = ( + row["Opponent index"], + row["Actions"], + ) if opponent_index in cooperation_rates: cooperation_rates[opponent_index].append(did_c(player_history)) else: @@ -590,7 +603,8 @@ def plot( if display_names: plt.yticks( - range(len(self.opponents)), [str(player) for player in self.opponents] + range(len(self.opponents)), + [str(player) for player in self.opponents], ) else: plt.yticks([0, len(self.opponents) - 1], [0, 1]) diff --git a/axelrod/game.py b/axelrod/game.py index 1c3278275..180a9f110 100644 --- a/axelrod/game.py +++ b/axelrod/game.py @@ -16,9 +16,11 @@ class Game(object): The numerical score attribute to all combinations of action pairs. """ - def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> None: + def __init__( + self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1 + ) -> None: """Create a new game object. - + Parameters ---------- r: int or float @@ -30,7 +32,12 @@ def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> No p: int or float Score obtained by both player for mutual defection. """ - self.scores = {(C, C): (r, r), (D, D): (p, p), (C, D): (s, t), (D, C): (t, s)} + self.scores = { + (C, C): (r, r), + (D, D): (p, p), + (C, D): (s, t), + (D, C): (t, s), + } def RPST(self) -> Tuple[Score, Score, Score, Score]: """Returns game matrix values in Press and Dyson notation.""" diff --git a/axelrod/graph.py b/axelrod/graph.py index 9f41bde4d..54aaee6e9 100644 --- a/axelrod/graph.py +++ b/axelrod/graph.py @@ -9,12 +9,12 @@ class Graph(object): """Weighted and directed graph class. - + This class is intended for the graph associated to a Markov process, since it gives easy access to the neighbors of a particular state. Vertices can be any hashable Python object. - + Initialize with a list of edges: [[node1, node2, weights], ...] Weights can be omitted for an undirected graph. @@ -22,7 +22,7 @@ class Graph(object): For efficiency, neighbors are cached in dictionaries. Undirected graphs are implemented as directed graphs in which every edge (s, t) has the opposite edge (t, s). - + Attributes ---------- directed: Boolean indicating whether the graph is directed @@ -31,7 +31,7 @@ class Graph(object): all tails to their edge weights (None means no weight) in_mapping: a dictionary mapping all tails to dictionaries that map all heads to their edge weights (none means to weight) - + Properties ---------- vertices: the set of vertices in the graph @@ -155,8 +155,9 @@ def attached_complete_graphs(length, loops=True, directed=False): for cluster in range(2): for i in range(length): for j in range(i + 1, length): - edges.append(("{}:{}".format(cluster, i), - "{}:{}".format(cluster, j))) + edges.append( + ("{}:{}".format(cluster, i), "{}:{}".format(cluster, j)) + ) # Attach at one node edges.append(("0:0", "1:0")) graph = Graph(directed=directed, edges=edges) diff --git a/axelrod/history.py b/axelrod/history.py index 9c0ff2ef1..aea7b1f64 100644 --- a/axelrod/history.py +++ b/axelrod/history.py @@ -82,7 +82,9 @@ def __eq__(self, other): if isinstance(other, list): return self._plays == other elif isinstance(other, History): - return self._plays == other._plays and self._coplays == other._coplays + return ( + self._plays == other._plays and self._coplays == other._coplays + ) raise TypeError("Cannot compare types.") def __getitem__(self, key): @@ -121,7 +123,9 @@ def __init__(self, memory_depth, plays=None, coplays=None): def flip_plays(self): """Creates a flipped plays history for use with DualTransformer.""" flipped_plays = [action.flip() for action in self._plays] - return self.__class__(self.memory_depth, plays=flipped_plays, coplays=self._coplays) + return self.__class__( + self.memory_depth, plays=flipped_plays, coplays=self._coplays + ) def append(self, play, coplay): """Appends a new (play, coplay) pair an updates metadata for diff --git a/axelrod/interaction_utils.py b/axelrod/interaction_utils.py index 82159f94c..220053ee3 100644 --- a/axelrod/interaction_utils.py +++ b/axelrod/interaction_utils.py @@ -32,7 +32,8 @@ def compute_final_score(interactions, game=None): return None final_score = tuple( - sum([score[player_index] for score in scores]) for player_index in [0, 1] + sum([score[player_index] for score in scores]) + for player_index in [0, 1] ) return final_score diff --git a/axelrod/load_data_.py b/axelrod/load_data_.py index ac29250fd..30e407566 100644 --- a/axelrod/load_data_.py +++ b/axelrod/load_data_.py @@ -56,7 +56,12 @@ def load_pso_tables(filename="pso_gambler.csv", directory="data"): rows = load_file(filename, directory) d = dict() for row in rows: - name, a, b, c, = str(row[0]), int(row[1]), int(row[2]), int(row[3]) + name, a, b, c, = ( + str(row[0]), + int(row[1]), + int(row[2]), + int(row[3]), + ) values = list(map(float, row[4:])) d[(name, int(a), int(b), int(c))] = values return d diff --git a/axelrod/match.py b/axelrod/match.py index 907e85f44..19c83abd9 100644 --- a/axelrod/match.py +++ b/axelrod/match.py @@ -29,7 +29,7 @@ def __init__( noise=0, match_attributes=None, reset=True, - seed=None + seed=None, ): """ Parameters @@ -193,7 +193,8 @@ def play(self): result = [] for _ in range(turns): plays = self.simultaneous_play( - self.players[0], self.players[1], self.noise) + self.players[0], self.players[1], self.noise + ) result.append(plays) if self._cache_update_required: diff --git a/axelrod/match_generator.py b/axelrod/match_generator.py index c0ad188bd..37038e7b5 100644 --- a/axelrod/match_generator.py +++ b/axelrod/match_generator.py @@ -12,7 +12,7 @@ def __init__( prob_end=None, edges=None, match_attributes=None, - seed=None + seed=None, ): """ A class to generate matches. This is used by the Tournament class which diff --git a/axelrod/moran.py b/axelrod/moran.py index 3c9876c40..af235e1d9 100644 --- a/axelrod/moran.py +++ b/axelrod/moran.py @@ -28,7 +28,7 @@ def __init__( fitness_transformation: Callable = None, mutation_method="transition", stop_on_fixation=True, - seed=None + seed=None, ) -> None: """ An agent based Moran process class. In each round, each player plays a @@ -93,7 +93,9 @@ def __init__( if m in ["atomic", "transition"]: self.mutation_method = m else: - raise ValueError("Invalid mutation method {}".format(mutation_method)) + raise ValueError( + "Invalid mutation method {}".format(mutation_method) + ) assert (mutation_rate >= 0) and (mutation_rate <= 1) assert (noise >= 0) and (noise <= 1) mode = mode.lower() @@ -127,7 +129,9 @@ def __init__( d[str(p)] = p mutation_targets = dict() for key in sorted(keys): - mutation_targets[key] = [v for (k, v) in sorted(d.items()) if k != key] + mutation_targets[key] = [ + v for (k, v) in sorted(d.items()) if k != key + ] self.mutation_targets = mutation_targets if interaction_graph is None: @@ -146,14 +150,18 @@ def __init__( self.fitness_transformation = fitness_transformation # Map players to graph vertices self.locations = sorted(interaction_graph.vertices) - self.index = dict(zip(sorted(interaction_graph.vertices), range(len(players)))) + self.index = dict( + zip(sorted(interaction_graph.vertices), range(len(players))) + ) self.fixated = self.fixation_check() def set_players(self) -> None: """Copy the initial players into the first population, setting seeds as needed.""" self.players = [] for player in self.initial_players: - if (self.mutation_method == "atomic") and issubclass(player.__class__, EvolvablePlayer): + if (self.mutation_method == "atomic") and issubclass( + player.__class__, EvolvablePlayer + ): # For reproducibility, we generate random seeds for evolvable players. seed = next(self._bulk_random) new_player = player.create_new(seed=seed) @@ -163,8 +171,9 @@ def set_players(self) -> None: self.players.append(player) self.populations = [self.population_distribution()] - def fitness_proportionate_selection(self, - scores: List, fitness_transformation: Callable = None) -> int: + def fitness_proportionate_selection( + self, scores: List, fitness_transformation: Callable = None + ) -> int: """Randomly selects an individual proportionally to score. Parameters @@ -200,7 +209,9 @@ def mutate(self, index: int) -> Player: if self.mutation_method == "atomic": if not issubclass(self.players[index].__class__, EvolvablePlayer): - raise TypeError("Player is not evolvable. Use a subclass of EvolvablePlayer.") + raise TypeError( + "Player is not evolvable. Use a subclass of EvolvablePlayer." + ) return self.players[index].mutate() # Assuming mutation_method == "transition" @@ -237,7 +248,9 @@ def death(self, index: int = None) -> int: # Select locally # index is not None in this case vertex = self._random.choice( - sorted(self.reproduction_graph.out_vertices(self.locations[index])) + sorted( + self.reproduction_graph.out_vertices(self.locations[index]) + ) ) i = self.index[vertex] return i @@ -370,7 +383,7 @@ def score_all(self) -> List: noise=self.noise, game=self.game, deterministic_cache=self.deterministic_cache, - seed=next(self._bulk_random) + seed=next(self._bulk_random), ) match.play() match_scores = match.final_score_per_turn() @@ -484,8 +497,11 @@ class ApproximateMoranProcess(MoranProcess): """ def __init__( - self, players: List[Player], cached_outcomes: dict, mutation_rate: float = 0, - seed: Optional[int] = None + self, + players: List[Player], + cached_outcomes: dict, + mutation_rate: float = 0, + seed: Optional[int] = None, ) -> None: """ Parameters @@ -503,7 +519,7 @@ def __init__( noise=0, deterministic_cache=None, mutation_rate=mutation_rate, - seed=seed + seed=seed, ) self.cached_outcomes = cached_outcomes @@ -529,7 +545,9 @@ def score_all(self) -> List: scores = [0] * N for i in range(N): for j in range(i + 1, N): - player_names = tuple([str(self.players[i]), str(self.players[j])]) + player_names = tuple( + [str(self.players[i]), str(self.players[j])] + ) cached_score = self._get_scores_from_cache(player_names) scores[i] += cached_score[0] scores[j] += cached_score[1] diff --git a/axelrod/player.py b/axelrod/player.py index 6b7370be2..b5f0ff565 100644 --- a/axelrod/player.py +++ b/axelrod/player.py @@ -59,12 +59,13 @@ def _post_init(self): See here to learn more: https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/ """ + def __call__(cls, *args, **kwargs): # This calls cls.__new__ and cls.__init__ obj = type.__call__(cls, *args, **kwargs) # Next we do any post init or post transform tasks, like recomputing # classifiers - # Note that subclasses inherit the metaclass, and subclasses my override + # Note that subclasses inherit the metaclass, and subclasses may override # or extend __init__ so it's necessary to do these tasks after all the # __init__'s have run in the case of a post-transform reclassification. obj._post_init() @@ -130,7 +131,9 @@ def __eq__(self, other): if self.__repr__() != other.__repr__(): return False - for attribute in set(list(self.__dict__.keys()) + list(other.__dict__.keys())): + for attribute in set( + list(self.__dict__.keys()) + list(other.__dict__.keys()) + ): value = getattr(self, attribute, None) other_value = getattr(other, attribute, None) @@ -148,14 +151,20 @@ def __eq__(self, other): ): # Split the original generator so it is not touched generator, original_value = itertools.tee(value) - other_generator, original_other_value = itertools.tee(other_value) + other_generator, original_other_value = itertools.tee( + other_value + ) if isinstance(value, types.GeneratorType): setattr(self, attribute, (ele for ele in original_value)) - setattr(other, attribute, (ele for ele in original_other_value)) + setattr( + other, attribute, (ele for ele in original_other_value) + ) else: setattr(self, attribute, itertools.cycle(original_value)) - setattr(other, attribute, itertools.cycle(original_other_value)) + setattr( + other, attribute, itertools.cycle(original_other_value) + ) for _ in range(200): try: @@ -190,7 +199,8 @@ def set_seed(self, seed): if seed is None: warnings.warn( "Initializing player with seed from Axelrod module random number generator. " - "Results may not be seed reproducible.") + "Results may not be seed reproducible." + ) self._seed = _module_random.random_seed_int() else: self._seed = seed @@ -201,7 +211,9 @@ def __repr__(self): Appends the `__init__` parameters to the strategy's name.""" name = self.name prefix = ": " - gen = (value for value in self.init_kwargs.values() if value is not None) + gen = ( + value for value in self.init_kwargs.values() if value is not None + ) for value in gen: try: if issubclass(value, Player): diff --git a/axelrod/plot.py b/axelrod/plot.py index f56cc9f4f..feacae984 100644 --- a/axelrod/plot.py +++ b/axelrod/plot.py @@ -97,7 +97,9 @@ def _winplot_dataset(self): # Sort wins by median wins = self.result_set.wins medians = map(median, wins) - medians = sorted([(m, i) for (i, m) in enumerate(medians)], reverse=True) + medians = sorted( + [(m, i) for (i, m) in enumerate(medians)], reverse=True + ) # Reorder and grab names wins = [wins[x[-1]] for x in medians] ranked_names = [str(self.players[x[-1]]) for x in medians] @@ -324,7 +326,9 @@ def save_all_plots( pbar = tqdm.tqdm(total=total, desc="Obtaining plots") for method, name in plots: - f = getattr(self, method)(title="{} - {}".format(title_prefix, name)) + f = getattr(self, method)( + title="{} - {}".format(title_prefix, name) + ) path = pathlib.Path("{}_{}.{}".format(prefix, method, filetype)) f.savefig(axl_filename(path)) plt.close(f) diff --git a/axelrod/random_.py b/axelrod/random_.py index 23949c93e..73289ff8f 100644 --- a/axelrod/random_.py +++ b/axelrod/random_.py @@ -11,6 +11,7 @@ class RandomGenerator(object): """Container around a random number generator. Enables reproducibility of player behavior, matches, and tournaments.""" + def __init__(self, seed: Optional[int] = None): # _random is the internal object that generators random values self._random = RandomState() @@ -28,7 +29,7 @@ def randint(self, *args, **kwargs): return self._random.randint(*args, **kwargs) def random_seed_int(self) -> int: - return self.randint(low=0, high=2**32-1, dtype="uint64") + return self.randint(low=0, high=2 ** 32 - 1, dtype="uint64") def choice(self, *args, **kwargs): return self._random.choice(*args, **kwargs) @@ -118,7 +119,8 @@ class BulkRandomGenerator(object): """Bulk generator of random integers for tournament seeding and reproducibility. Bulk generation of random values is more efficient. Use this class like a generator.""" - def __init__(self, seed=None, batch_size:int = 1000): + + def __init__(self, seed=None, batch_size: int = 1000): self._random_generator = RandomState() self._random_generator.seed(seed) self._ints = None @@ -130,10 +132,8 @@ def _fill_ints(self): # Generate more random values. Store as a list since generators # cannot be pickled. self._ints = self._random_generator.randint( - low=0, - high=2**32 - 1, - size=self._batch_size, - dtype="uint64") + low=0, high=2 ** 32 - 1, size=self._batch_size, dtype="uint64" + ) self._index = 0 def __next__(self): diff --git a/axelrod/result_set.py b/axelrod/result_set.py index 65353cc6d..5f1fc8d8a 100644 --- a/axelrod/result_set.py +++ b/axelrod/result_set.py @@ -117,9 +117,15 @@ def _reshape_out( alternative=0, ) - self.wins = self._reshape_two_dim_list(sum_per_player_repetition_df["Win"]) - self.scores = self._reshape_two_dim_list(sum_per_player_repetition_df["Score"]) - self.normalised_scores = self._reshape_two_dim_list(normalised_scores_series) + self.wins = self._reshape_two_dim_list( + sum_per_player_repetition_df["Win"] + ) + self.scores = self._reshape_two_dim_list( + sum_per_player_repetition_df["Score"] + ) + self.normalised_scores = self._reshape_two_dim_list( + normalised_scores_series + ) self.cooperation = self._build_cooperation( sum_per_player_opponent_df["Cooperation count"] @@ -132,7 +138,9 @@ def _reshape_out( self.state_distribution = self._build_state_distribution( sum_per_player_opponent_df[columns] ) - self.normalised_state_distribution = self._build_normalised_state_distribution() + self.normalised_state_distribution = ( + self._build_normalised_state_distribution() + ) columns = [ "CC to C count", @@ -144,8 +152,10 @@ def _reshape_out( "DD to C count", "DD to D count", ] - self.state_to_action_distribution = self._build_state_to_action_distribution( - sum_per_player_opponent_df[columns] + self.state_to_action_distribution = ( + self._build_state_to_action_distribution( + sum_per_player_opponent_df[columns] + ) ) self.normalised_state_to_action_distribution = ( self._build_normalised_state_to_action_distribution() @@ -166,7 +176,9 @@ def _reshape_out( self.ranked_names = self._build_ranked_names() self.payoff_matrix = self._build_summary_matrix(self.payoffs) - self.payoff_stddevs = self._build_summary_matrix(self.payoffs, func=np.std) + self.payoff_stddevs = self._build_summary_matrix( + self.payoffs, func=np.std + ) self.payoff_diffs_means = self._build_payoff_diffs_means() self.cooperating_rating = self._build_cooperating_rating() @@ -266,7 +278,9 @@ def _build_good_partner_matrix(self, good_partner_series): # interactions. row.append(0) else: - row.append(good_partner_dict.get((player_index, opponent_index), 0)) + row.append( + good_partner_dict.get((player_index, opponent_index), 0) + ) good_partner_matrix.append(row) return good_partner_matrix @@ -334,13 +348,17 @@ def _build_normalised_state_distribution(self): for counter in player: total = sum(counter.values()) counters.append( - Counter({key: value / total for key, value in counter.items()}) + Counter( + {key: value / total for key, value in counter.items()} + ) ) normalised_state_distribution.append(counters) return normalised_state_distribution @update_progress_bar - def _build_state_to_action_distribution(self, state_to_action_distribution_series): + def _build_state_to_action_distribution( + self, state_to_action_distribution_series + ): state_to_action_key_map = { "CC to C count": ((C, C), C), "CC to D count": ((C, C), D), @@ -396,8 +414,12 @@ def _build_normalised_state_to_action_distribution(self): return normalised_state_to_action_distribution @update_progress_bar - def _build_initial_cooperation_count(self, initial_cooperation_count_series): - initial_cooperation_count_dict = initial_cooperation_count_series.to_dict() + def _build_initial_cooperation_count( + self, initial_cooperation_count_series + ): + initial_cooperation_count_dict = ( + initial_cooperation_count_series.to_dict() + ) initial_cooperation_count = [ initial_cooperation_count_dict.get(player_index, 0) for player_index in range(self.num_players) @@ -427,7 +449,8 @@ def _build_initial_cooperation_rate(self, interactions_series): warnings.simplefilter("ignore") initial_cooperation_rate = list( np.nan_to_num( - np.array(self.initial_cooperation_count) / interactions_array + np.array(self.initial_cooperation_count) + / interactions_array ) ) return initial_cooperation_rate @@ -453,7 +476,9 @@ def _build_eigenmoses_rating(self): The eigenmoses rating as defined in: http://www.scottaaronson.com/morality.pdf """ - eigenvector, eigenvalue = eigen.principal_eigenvector(self.vengeful_cooperation) + eigenvector, eigenvalue = eigen.principal_eigenvector( + self.vengeful_cooperation + ) return eigenvector.tolist() @@ -577,7 +602,9 @@ def _build_tasks(self, df): ] sum_per_player_opponent_task = df.groupby(groups)[columns].sum() - ignore_self_interactions_task = df["Player index"] != df["Opponent index"] + ignore_self_interactions_task = ( + df["Player index"] != df["Opponent index"] + ) adf = df[ignore_self_interactions_task] groups = ["Player index", "Repetition"] @@ -591,7 +618,9 @@ def _build_tasks(self, df): groups = ["Player index"] column = "Initial cooperation" initial_cooperation_count_task = adf.groupby(groups)[column].sum() - interactions_count_task = adf.groupby("Player index")["Player index"].count() + interactions_count_task = adf.groupby("Player index")[ + "Player index" + ].count() return ( mean_per_reps_player_opponent_task, @@ -642,8 +671,12 @@ def list_equal_with_nans(v1: List[float], v2: List[float]) -> bool: self.cooperating_rating == other.cooperating_rating, self.good_partner_matrix == other.good_partner_matrix, self.good_partner_rating == other.good_partner_rating, - list_equal_with_nans(self.eigenmoses_rating, other.eigenmoses_rating), - list_equal_with_nans(self.eigenjesus_rating, other.eigenjesus_rating), + list_equal_with_nans( + self.eigenmoses_rating, other.eigenmoses_rating + ), + list_equal_with_nans( + self.eigenjesus_rating, other.eigenjesus_rating + ), ] ) @@ -713,7 +746,9 @@ def summarise(self): rates = [] for state in states: counts = [ - counter[(state, C)] for counter in player if counter[(state, C)] > 0 + counter[(state, C)] + for counter in player + if counter[(state, C)] > 0 ] if len(counts) > 0: @@ -736,7 +771,9 @@ def summarise(self): summary_data = [] for rank, i in enumerate(self.ranking): - data = list(summary_measures[i]) + state_prob[i] + state_to_C_prob[i] + data = ( + list(summary_measures[i]) + state_prob[i] + state_to_C_prob[i] + ) summary_data.append(self.player(rank, *data)) return summary_data diff --git a/axelrod/strategies/__init__.py b/axelrod/strategies/__init__.py index a7635596e..537ee89bc 100644 --- a/axelrod/strategies/__init__.py +++ b/axelrod/strategies/__init__.py @@ -90,7 +90,9 @@ short_run_time_strategies = [ s for s in strategies if not Classifiers["long_run_time"](s()) ] -cheating_strategies = [s for s in all_strategies if not Classifiers.obey_axelrod(s())] +cheating_strategies = [ + s for s in all_strategies if not Classifiers.obey_axelrod(s()) +] ordinary_strategies = strategies # This is a legacy and will be removed diff --git a/axelrod/strategies/_filters.py b/axelrod/strategies/_filters.py index c9c199bf7..2dc7a88b6 100644 --- a/axelrod/strategies/_filters.py +++ b/axelrod/strategies/_filters.py @@ -155,15 +155,24 @@ class ExampleStrategy(Player): ), "manipulates_state": FilterFunction( function=passes_operator_filter, - kwargs={"classifier_key": "manipulates_state", "operator": operator.eq}, + kwargs={ + "classifier_key": "manipulates_state", + "operator": operator.eq, + }, ), "manipulates_source": FilterFunction( function=passes_operator_filter, - kwargs={"classifier_key": "manipulates_source", "operator": operator.eq}, + kwargs={ + "classifier_key": "manipulates_source", + "operator": operator.eq, + }, ), "inspects_source": FilterFunction( function=passes_operator_filter, - kwargs={"classifier_key": "inspects_source", "operator": operator.eq}, + kwargs={ + "classifier_key": "inspects_source", + "operator": operator.eq, + }, ), "memory_depth": FilterFunction( function=passes_operator_filter, @@ -178,7 +187,8 @@ class ExampleStrategy(Player): kwargs={"classifier_key": "memory_depth", "operator": operator.le}, ), "makes_use_of": FilterFunction( - function=passes_in_list_filter, kwargs={"classifier_key": "makes_use_of"} + function=passes_in_list_filter, + kwargs={"classifier_key": "makes_use_of"}, ), } diff --git a/axelrod/strategies/adaptive.py b/axelrod/strategies/adaptive.py index 02e930162..edc5f6a98 100644 --- a/axelrod/strategies/adaptive.py +++ b/axelrod/strategies/adaptive.py @@ -7,7 +7,7 @@ class Adaptive(Player): - """Start with a specific sequence of C and D, then play the strategy that + """Start with a specific sequence of C and D, then play the action that has worked best, recalculated each turn. Names: @@ -42,6 +42,8 @@ def score_last_round(self, opponent: Player): self.scores[last_round[0]] += scores[0] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" + # Update scores from the last play self.score_last_round(opponent) # Begin by playing the sequence C,C,C,C,C,C,D,D,D,D,D diff --git a/axelrod/strategies/adaptor.py b/axelrod/strategies/adaptor.py index f8c550f61..f949f0afe 100644 --- a/axelrod/strategies/adaptor.py +++ b/axelrod/strategies/adaptor.py @@ -36,14 +36,16 @@ class AbstractAdaptor(Player): "manipulates_state": False, } - def __init__(self, delta: Dict[Tuple[Action, Action], float], - perr: float = 0.01) -> None: + def __init__( + self, delta: Dict[Tuple[Action, Action], float], perr: float = 0.01 + ) -> None: super().__init__() self.perr = perr self.delta = delta - self.s = 0. + self.s = 0.0 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if self.history: # Update internal state from the last play last_round = (self.history[-1], opponent.history[-1]) @@ -51,7 +53,8 @@ def strategy(self, opponent: Player) -> Action: # Compute probability of Cooperation p = self.perr + (1.0 - 2 * self.perr) * ( - heaviside(self.s + 1, 1) - heaviside(self.s - 1, 1)) + heaviside(self.s + 1, 1) - heaviside(self.s - 1, 1) + ) # Draw action action = self._random.random_choice(p) return action @@ -71,10 +74,10 @@ class AdaptorBrief(AbstractAdaptor): def __init__(self) -> None: delta = { - (C, C): 0., # R + (C, C): 0.0, # R (C, D): -1.001505, # S - (D, C): 0.992107, # T - (D, D): -0.638734 # P + (D, C): 0.992107, # T + (D, D): -0.638734, # P } super().__init__(delta=delta) @@ -93,9 +96,9 @@ class AdaptorLong(AbstractAdaptor): def __init__(self) -> None: delta = { - (C, C): 0., # R + (C, C): 0.0, # R (C, D): 1.888159, # S (D, C): 1.858883, # T - (D, D): -0.995703 # P + (D, D): -0.995703, # P } super().__init__(delta=delta) diff --git a/axelrod/strategies/alternator.py b/axelrod/strategies/alternator.py index 4147f1af7..244ca4a07 100644 --- a/axelrod/strategies/alternator.py +++ b/axelrod/strategies/alternator.py @@ -25,6 +25,7 @@ class Alternator(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return C if self.history[-1] == C: diff --git a/axelrod/strategies/ann.py b/axelrod/strategies/ann.py index 74bd1ab7e..d2e368bcc 100644 --- a/axelrod/strategies/ann.py +++ b/axelrod/strategies/ann.py @@ -113,7 +113,10 @@ def compute_features(player: Player, opponent: Player) -> List[int]: def activate( - bias: List[float], hidden: List[float], output: List[float], inputs: List[int] + bias: List[float], + hidden: List[float], + output: List[float], + inputs: List[int], ) -> float: """ Compute the output of the neural network: @@ -193,8 +196,7 @@ class ANN(Player): } def __init__( - self, num_features: int, num_hidden: int, - weights: List[float] = None + self, num_features: int, num_hidden: int, weights: List[float] = None ) -> None: Player.__init__(self) self.num_features = num_features @@ -209,6 +211,7 @@ def _process_weights(self, weights, num_features, num_hidden): self.bias_weights = np.array(bias) def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" features = compute_features(self, opponent) output = activate( self.bias_weights, @@ -224,42 +227,68 @@ def strategy(self, opponent: Player) -> Action: class EvolvableANN(ANN, EvolvablePlayer): """Evolvable version of ANN.""" + name = "EvolvableANN" def __init__( - self, num_features: int, num_hidden: int, + self, + num_features: int, + num_hidden: int, weights: List[float] = None, mutation_probability: float = None, mutation_distance: int = 5, - seed: int = None + seed: int = None, ) -> None: EvolvablePlayer.__init__(self, seed=seed) - num_features, num_hidden, weights, mutation_probability = self._normalize_parameters( - num_features, num_hidden, weights, mutation_probability) - ANN.__init__(self, - num_features=num_features, - num_hidden=num_hidden, - weights=weights) + ( + num_features, + num_hidden, + weights, + mutation_probability, + ) = self._normalize_parameters( + num_features, num_hidden, weights, mutation_probability + ) + ANN.__init__( + self, + num_features=num_features, + num_hidden=num_hidden, + weights=weights, + ) self.mutation_probability = mutation_probability self.mutation_distance = mutation_distance self.overwrite_init_kwargs( num_features=num_features, num_hidden=num_hidden, weights=weights, - mutation_probability=mutation_probability) + mutation_probability=mutation_probability, + ) - def _normalize_parameters(self, num_features=None, num_hidden=None, weights=None, mutation_probability=None): + def _normalize_parameters( + self, + num_features=None, + num_hidden=None, + weights=None, + mutation_probability=None, + ): if not (num_features and num_hidden): - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableANN") + raise InsufficientParametersError( + "Insufficient Parameters to instantiate EvolvableANN" + ) size = num_weights(num_features, num_hidden) if not weights: weights = [self._random.uniform(-1, 1) for _ in range(size)] if mutation_probability is None: - mutation_probability = 10. / size + mutation_probability = 10.0 / size return num_features, num_hidden, weights, mutation_probability - def mutate_weights(self, weights, num_features, num_hidden, mutation_probability, - mutation_distance): + def mutate_weights( + self, + weights, + num_features, + num_hidden, + mutation_probability, + mutation_distance, + ): size = num_weights(num_features, num_hidden) randoms = self._random.random(size) for i, r in enumerate(randoms): @@ -270,13 +299,19 @@ def mutate_weights(self, weights, num_features, num_hidden, mutation_probability def mutate(self): weights = self.mutate_weights( - self.weights, self.num_features, self.num_hidden, - self.mutation_probability, self.mutation_distance) + self.weights, + self.num_features, + self.num_hidden, + self.mutation_probability, + self.mutation_distance, + ) return self.create_new(weights=weights) def crossover(self, other): if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") + raise TypeError( + "Crossover must be between the same player classes." + ) weights = crossover_lists(self.weights, other.weights, self._random) return self.create_new(weights=weights) @@ -299,9 +334,8 @@ class EvolvedANN(ANN): def __init__(self) -> None: num_features, num_hidden, weights = nn_weights["Evolved ANN"] super().__init__( - num_features=num_features, - num_hidden=num_hidden, - weights=weights) + num_features=num_features, num_hidden=num_hidden, weights=weights + ) class EvolvedANN5(ANN): @@ -322,9 +356,8 @@ class EvolvedANN5(ANN): def __init__(self) -> None: num_features, num_hidden, weights = nn_weights["Evolved ANN 5"] super().__init__( - num_features=num_features, - num_hidden=num_hidden, - weights=weights) + num_features=num_features, num_hidden=num_hidden, weights=weights + ) class EvolvedANNNoise05(ANN): @@ -345,6 +378,5 @@ class EvolvedANNNoise05(ANN): def __init__(self) -> None: num_features, num_hidden, weights = nn_weights["Evolved ANN 5 Noise 05"] super().__init__( - num_features=num_features, - num_hidden=num_hidden, - weights=weights) + num_features=num_features, num_hidden=num_hidden, weights=weights + ) diff --git a/axelrod/strategies/apavlov.py b/axelrod/strategies/apavlov.py index 3e3b17525..6497d2b2d 100644 --- a/axelrod/strategies/apavlov.py +++ b/axelrod/strategies/apavlov.py @@ -33,6 +33,7 @@ def __init__(self) -> None: self.opponent_class = None # type: Optional[str] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # TFT for six rounds if len(self.history) < 6: return D if opponent.history[-1:] == [D] else C @@ -96,6 +97,7 @@ def __init__(self) -> None: self.opponent_class = None # type: Optional[str] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # TFT for six rounds if len(self.history) < 6: return D if opponent.history[-1:] == [D] else C diff --git a/axelrod/strategies/appeaser.py b/axelrod/strategies/appeaser.py index fd7b224f0..69916fb2c 100644 --- a/axelrod/strategies/appeaser.py +++ b/axelrod/strategies/appeaser.py @@ -26,6 +26,7 @@ class Appeaser(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action """ if not len(opponent.history): return C else: diff --git a/axelrod/strategies/averagecopier.py b/axelrod/strategies/averagecopier.py index 131a9cca0..cde4a0ab1 100644 --- a/axelrod/strategies/averagecopier.py +++ b/axelrod/strategies/averagecopier.py @@ -4,9 +4,14 @@ C, D = Action.C, Action.D +def calculate_cooperation_ratio(player: Player) -> float: + """ Calculates the cooperation ratio of the player given.""" + return player.cooperations / len(player.history) + + class AverageCopier(Player): """ - The player will cooperate with probability p if the opponent's cooperation + The player will cooperate with probability p where the opponent's cooperation ratio is p. Starts with random decision. Names: @@ -25,11 +30,11 @@ class AverageCopier(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) == 0: # Randomly picks a strategy (not affected by history). return self._random.random_choice(0.5) - p = opponent.cooperations / len(opponent.history) - return self._random.random_choice(p) + return self._random.random_choice(calculate_cooperation_ratio(opponent)) class NiceAverageCopier(Player): @@ -52,7 +57,7 @@ class NiceAverageCopier(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) == 0: return C - p = opponent.cooperations / len(opponent.history) - return self._random.random_choice(p) + return self._random.random_choice(calculate_cooperation_ratio(opponent)) diff --git a/axelrod/strategies/axelrod_first.py b/axelrod/strategies/axelrod_first.py index 0cc16d4e6..b3065e25a 100644 --- a/axelrod/strategies/axelrod_first.py +++ b/axelrod/strategies/axelrod_first.py @@ -66,7 +66,7 @@ def strategy(self, opponent: Player) -> Action: opponent ever plays D.""" if len(self.history) < self._rounds_to_cooperate: return C - if opponent.defections > 0: # Implement Grudger + if opponent.defections > 0: #  Implement Grudger return D return C @@ -245,6 +245,7 @@ def __init__(self) -> None: self.number_opponent_cooperations_in_response_to_D = 0 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" round_number = len(self.history) + 1 if round_number == 1: @@ -262,12 +263,14 @@ def strategy(self, opponent: Player) -> Action: # Adding 1 to cooperations for assumption that first opponent move # being a response to a cooperation. See docstring for more # information. - alpha = (self.number_opponent_cooperations_in_response_to_C / - (self.cooperations + 1)) + alpha = self.number_opponent_cooperations_in_response_to_C / ( + self.cooperations + 1 + ) # Adding 2 to defections on the assumption that the first two # moves are defections, which may not be true in a noisy match - beta = (self.number_opponent_cooperations_in_response_to_D / - max(self.defections, 2)) + beta = self.number_opponent_cooperations_in_response_to_D / max( + self.defections, 2 + ) R, P, S, T = self.match_attributes["game"].RPST() expected_value_of_cooperating = alpha * R + (1 - alpha) * S @@ -345,6 +348,7 @@ def _cooperation_probability(self) -> float: return max(self._start_coop_prob + slope * rounds, self._end_coop_prob) def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not opponent.history: return C if opponent.history[-1] == D: @@ -430,25 +434,34 @@ def strategy(self, opponent: Player) -> Action: # Check if opponent plays randomly, if so, defect for the rest of the game p_value = chisquare([opponent.cooperations, opponent.defections]).pvalue - self.opponent_is_random = (p_value >= self.alpha) or self.opponent_is_random + self.opponent_is_random = ( + p_value >= self.alpha + ) or self.opponent_is_random if self.opponent_is_random: return D - if all( - opponent.history[i] == self.history[i - 1] - for i in range(1, len(self.history)) - ) or opponent.history == self.history: + if ( + all( + opponent.history[i] == self.history[i - 1] + for i in range(1, len(self.history)) + ) + or opponent.history == self.history + ): # Check if opponent plays Tit for Tat or a clone of itself. if opponent.history[-1] == D: return D return C if self.next_random_defection_turn is None: - self.next_random_defection_turn = self._random.randint(5, 15) + len(self.history) + self.next_random_defection_turn = self._random.randint(5, 15) + len( + self.history + ) if len(self.history) == self.next_random_defection_turn: # resample the next defection turn - self.next_random_defection_turn = self._random.randint(5, 15) + len(self.history) + self.next_random_defection_turn = self._random.randint(5, 15) + len( + self.history + ) return D return C @@ -480,6 +493,7 @@ class FirstByGrofman(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0 or self.history[-1] == opponent.history[-1]: return C return self._random.random_choice(2 / 7) @@ -580,7 +594,27 @@ class FirstByNydegger(Player): } def __init__(self) -> None: - self.As = [1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, 61] + self.As = [ + 1, + 6, + 7, + 17, + 22, + 23, + 26, + 29, + 30, + 31, + 33, + 38, + 39, + 45, + 49, + 54, + 55, + 58, + 61, + ] self.score_map = {(C, C): 0, (C, D): 2, (D, C): 1, (D, D): 3} super().__init__() @@ -599,6 +633,7 @@ def score_history( return a def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return C if len(self.history) == 1: @@ -610,7 +645,9 @@ def strategy(self, opponent: Player) -> Action: else: # TFT return D if opponent.history[-1] == D else C - A = self.score_history(self.history[-3:], opponent.history[-3:], self.score_map) + A = self.score_history( + self.history[-3:], opponent.history[-3:], self.score_map + ) if A in self.As: return D return C @@ -680,6 +717,7 @@ def _decrease_retaliation_counter(self): self.is_retaliating = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not opponent.history: return C @@ -740,6 +778,7 @@ def __init__(self) -> None: self._rounds_to_cooperate = 11 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) < self._rounds_to_cooperate: return C rounds = self._rounds_to_cooperate - 1 @@ -784,6 +823,7 @@ class FirstByAnonymous(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" r = self._random.uniform(3, 7) / 10 return self._random.random_choice(r) @@ -839,6 +879,7 @@ def __init__(self, alpha: float = 0.05) -> None: self.opponent_is_random = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" round_number = len(self.history) + 1 # First 4 moves @@ -849,7 +890,9 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] if round_number % 15 == 0: - p_value = chisquare([opponent.cooperations, opponent.defections]).pvalue + p_value = chisquare( + [opponent.cooperations, opponent.defections] + ).pvalue self.opponent_is_random = p_value >= self.alpha if self.opponent_is_random: @@ -949,6 +992,7 @@ def _score_last_round(self, opponent: Player): self.opponent_score += scores[1] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not opponent.history: return C @@ -982,8 +1026,10 @@ def strategy(self, opponent: Player) -> Action: std_deviation = (N ** (1 / 2)) / 2 lower = N / 2 - 3 * std_deviation upper = N / 2 + 3 * std_deviation - if (self.remembered_number_of_opponent_defectioons <= lower or - self.remembered_number_of_opponent_defectioons >= upper): + if ( + self.remembered_number_of_opponent_defectioons <= lower + or self.remembered_number_of_opponent_defectioons >= upper + ): # Opponent deserves a fresh start self.last_fresh_start = current_round self._fresh_start() diff --git a/axelrod/strategies/axelrod_second.py b/axelrod/strategies/axelrod_second.py index 3ab54f40a..a328ffeb6 100644 --- a/axelrod/strategies/axelrod_second.py +++ b/axelrod/strategies/axelrod_second.py @@ -41,6 +41,7 @@ class SecondByChampion(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" current_round = len(self.history) # Cooperate for the first 10 turns if current_round == 0: @@ -84,6 +85,7 @@ class SecondByEatherley(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Cooperate on the first move if not len(opponent.history): return C @@ -127,6 +129,7 @@ def __init__(self) -> None: self.is_TFT = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Defect on the first move if not opponent.history: return D @@ -180,6 +183,7 @@ def __init__(self) -> None: self.patsy = True def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Defect on the first move if not self.history: return D @@ -327,8 +331,12 @@ def __init__(self): self.opponent_consecutive_defections = 0 # equal to S variable self.one_turn_after_good_defection_ratio = 5 # equal to AD variable self.two_turns_after_good_defection_ratio = 0 # equal to NO variable - self.one_turn_after_good_defection_ratio_count = 1 # equal to AK variable - self.two_turns_after_good_defection_ratio_count = 1 # equal to NK variable + self.one_turn_after_good_defection_ratio_count = ( + 1 # equal to AK variable + ) + self.two_turns_after_good_defection_ratio_count = ( + 1 # equal to NK variable + ) # All above variables correspond to those in original Fotran Code self.dict = {C: 0, D: 1} @@ -353,7 +361,12 @@ def update_state(self, opponent): ) + (3 - (3 * self.dict[opponent.history[-1]])) + (2 * self.dict[self.history[-1]]) - - ((self.dict[opponent.history[-1]] * self.dict[self.history[-1]])) + - ( + ( + self.dict[opponent.history[-1]] + * self.dict[self.history[-1]] + ) + ) ) / (self.two_turns_after_good_defection_ratio_count + 1) self.two_turns_after_good_defection_ratio_count += 1 elif self.num_turns_after_good_defection == 1: @@ -365,11 +378,15 @@ def update_state(self, opponent): ) + (3 - (3 * self.dict[opponent.history[-1]])) + (2 * self.dict[self.history[-1]]) - - (self.dict[opponent.history[-1]] * self.dict[self.history[-1]]) + - ( + self.dict[opponent.history[-1]] + * self.dict[self.history[-1]] + ) ) / (self.one_turn_after_good_defection_ratio_count + 1) self.one_turn_after_good_defection_ratio_count += 1 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not self.history: return C @@ -402,7 +419,10 @@ def strategy(self, opponent: Player) -> Action: return D if (current_score[0] / ((len(self.history)) + 1)) >= 1.75: probability = ( - (0.25 + ((opponent.cooperations + 1) / ((len(self.history)) + 1))) + ( + 0.25 + + ((opponent.cooperations + 1) / ((len(self.history)) + 1)) + ) - (self.opponent_consecutive_defections * 0.25) + ((current_score[0] - current_score[1]) / 100) + (4 / ((len(self.history)) + 1)) @@ -452,6 +472,7 @@ class SecondByGrofman(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Cooperate on the first two moves if len(self.history) < 2: return C @@ -516,9 +537,15 @@ class SecondByKluepfel(Player): def __init__(self): super().__init__() - self.cd_counts, self.dd_counts, self.dc_counts, self.cc_counts = 0, 0, 0, 0 + self.cd_counts, self.dd_counts, self.dc_counts, self.cc_counts = ( + 0, + 0, + 0, + 0, + ) def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # First update the response matrix. if len(self.history) >= 2: if self.history[-2] == D: @@ -534,7 +561,9 @@ def strategy(self, opponent: Player) -> Action: # Check for randomness if len(self.history) > 26: - if self.cd_counts >= (self.cd_counts + self.dd_counts) / 2 - 0.75 * np.sqrt( + if self.cd_counts >= ( + self.cd_counts + self.dd_counts + ) / 2 - 0.75 * np.sqrt( self.cd_counts + self.dd_counts ) and self.dc_counts >= ( self.dc_counts + self.cc_counts @@ -648,6 +677,7 @@ def try_return(self, to_return): return D def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 if turn == 1: @@ -761,6 +791,7 @@ class SecondByCave(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 if turn == 1: return C @@ -811,6 +842,7 @@ class SecondByWmAdams(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) <= 1: return C number_defects = opponent.defections @@ -867,6 +899,7 @@ def update_score(self, opponent: Player): self.own_score += game.score(last_round)[0] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if self.mode == "Defect": return D @@ -956,6 +989,7 @@ def try_return(self, to_return): return to_return def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) == 0: return C @@ -969,7 +1003,10 @@ def strategy(self, opponent: Player) -> Action: if self.forgive_flag: self.forgive_flag = False self.defect_padding = 0 - if self.grudge < len(self.history) + 1 and opponent.history[-1] == D: + if ( + self.grudge < len(self.history) + 1 + and opponent.history[-1] == D + ): # Then override self.grudge += 20 return self.try_return(C) @@ -1095,7 +1132,9 @@ def __init__(self): self.mode = "Normal" self.recorded_defects = 0 # Count opponent defects after turn 1 self.exit_defect_meter = 0 # When >= 11, then exit defect mode. - self.coops_in_first_36 = None # On turn 37, count cooperations in first 36 + self.coops_in_first_36 = ( + None # On turn 37, count cooperations in first 36 + ) self.was_defective = False # Previously in Defect mode self.prob = 0.25 # After turn 37, probability that we'll defect @@ -1105,7 +1144,9 @@ def __init__(self): self.more_coop = 0 # This schedules cooperation for future turns # Initial last_generous_n_turns_ago to 3 because this counts up and # triggers a strategy change at 2. - self.last_generous_n_turns_ago = 3 # How many tuns ago was a "generous" move + self.last_generous_n_turns_ago = ( + 3 # How many tuns ago was a "generous" move + ) self.burned = False self.defect_streak = 0 @@ -1114,7 +1155,9 @@ def __init__(self): 0, ] # Counters that get (almost) alternatively incremented. self.parity_bit = 0 # Which parity_streak to increment - self.parity_limit = 5 # When a parity streak hits this limit, alter strategy. + self.parity_limit = ( + 5 # When a parity streak hits this limit, alter strategy. + ) self.parity_hits = 0 # Counts how many times a parity_limit was hit. # After hitting parity_hits 8 times, lower parity_limit to 3. @@ -1153,7 +1196,9 @@ def calculate_chi_squared(self, turn): denom = turn - 2 expected_matrix = ( - np.outer(self.move_history.sum(axis=1), self.move_history.sum(axis=0)) + np.outer( + self.move_history.sum(axis=1), self.move_history.sum(axis=0) + ) / denom ) @@ -1162,7 +1207,9 @@ def calculate_chi_squared(self, turn): for j in range(2): expect = expected_matrix[i, j] if expect > 1.0: - chi_squared += (expect - self.move_history[i, j]) ** 2 / expect + chi_squared += ( + expect - self.move_history[i, j] + ) ** 2 / expect return chi_squared @@ -1184,7 +1231,10 @@ def detect_random(self, turn): if self.move_history[0, 0] / denom >= 0.8: return False - if self.recorded_defects / denom < 0.25 or self.recorded_defects / denom > 0.75: + if ( + self.recorded_defects / denom < 0.25 + or self.recorded_defects / denom > 0.75 + ): return False if self.calculate_chi_squared(turn) > 3: @@ -1224,6 +1274,7 @@ def detect_parity_streak(self, last_move): return True def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 if turn == 1: @@ -1276,7 +1327,11 @@ def strategy(self, opponent: Player) -> Action: # Only enter Fair-weather mode if the opponent Cooperated the first 37 # turns then Defected on the 38th. - if turn == 38 and opponent.history[-1] == D and opponent.cooperations == 36: + if ( + turn == 38 + and opponent.history[-1] == D + and opponent.cooperations == 36 + ): self.mode = "Fair-weather" return self.try_return(to_return=C, lower_flags=False) @@ -1297,7 +1352,9 @@ def strategy(self, opponent: Player) -> Action: self.parity_streak[ self.parity_bit ] = 0 # Reset `parity_streak` when we hit the limit. - self.parity_hits += 1 # Keep track of how many times we hit the limit. + self.parity_hits += ( + 1 # Keep track of how many times we hit the limit. + ) if self.parity_hits >= 8: # After 8 times, lower the limit. self.parity_limit = 3 return self.try_return( @@ -1389,6 +1446,7 @@ def _score_last_round(self, opponent: Player): self.opponent_score += scores[1] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" current_round = len(self.history) + 1 if current_round == 1: @@ -1463,6 +1521,7 @@ def __init__(self) -> None: self.flack = 0.0 # The relative untrustworthiness of opponent def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not opponent.history: return C @@ -1515,13 +1574,16 @@ def __init__(self) -> None: } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" recent_history = [C, C, C] # Default to C. for go_back in range(1, 4): if len(opponent.history) >= go_back: recent_history[-go_back] = opponent.history[-go_back] return self._random.random_choice( - self.prob_coop[(recent_history[-3], recent_history[-2], recent_history[-1])] + self.prob_coop[ + (recent_history[-3], recent_history[-2], recent_history[-1]) + ] ) @@ -1551,6 +1613,7 @@ class SecondByWhite(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 if turn <= 10 or opponent.history[-1] == C: @@ -1594,6 +1657,7 @@ def __init__(self) -> None: self.prob_coop = {0: 1.0, 1: 1.0, 2: 0.88, 3: 0.68, 4: 0.4, 5: 0.04} def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) < 5: return C @@ -1671,6 +1735,7 @@ def __init__(self) -> None: self.def_after_ab_count = 2 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 if turn == 1: return C @@ -1704,7 +1769,10 @@ def strategy(self, opponent: Player) -> Action: else: self.def_after_ab_count += 1 self.streak_needed = ( - np.floor(20.0 * self.def_after_ab_count / self.coop_after_ab_count) + 1 + np.floor( + 20.0 * self.def_after_ab_count / self.coop_after_ab_count + ) + + 1 ) self.current_streak = 0 return C @@ -1790,6 +1858,7 @@ def try_return(self, to_return, opp_def): return to_return def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 if turn == 1: return self.try_return(C, 0) @@ -1804,7 +1873,9 @@ def strategy(self, opponent: Player) -> Action: # Update history if turn >= 3: - self.count_them_us_them[(them_three_ago, us_two_ago, them_two_ago)] += 1 + self.count_them_us_them[ + (them_three_ago, us_two_ago, them_two_ago) + ] += 1 if ( self.count_them_us_them[(them_two_ago, us_last, C)] @@ -1866,7 +1937,9 @@ def __init__(self) -> None: (10, D, 7, C), ) - super().__init__(transitions=transitions, initial_state=0, initial_action=C) + super().__init__( + transitions=transitions, initial_state=0, initial_action=C + ) class SecondByMikkelson(FSMPlayer): @@ -1907,6 +1980,7 @@ def __init__(self) -> None: self.credit = 7 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 if turn == 1: return C @@ -1991,6 +2065,7 @@ def _score_last_round(self, opponent: Player): self.opponent_score += scores[1] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 if turn > 1: self._score_last_round(opponent) @@ -2081,6 +2156,7 @@ def __init__(self) -> None: self.first_opp_def = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) + 1 us_two_turns_ago = C if turn <= 2 else self.history[-2] @@ -2100,6 +2176,8 @@ def strategy(self, opponent: Player) -> Action: # Calculate the probability that the opponent cooperated last turn given # what we know two turns ago. - prob_coop = self.opp_c_after_x[us_two_turns_ago] / self.total_num_of_x[ - us_two_turns_ago] + prob_coop = ( + self.opp_c_after_x[us_two_turns_ago] + / self.total_num_of_x[us_two_turns_ago] + ) return self._random.random_choice(prob_coop) diff --git a/axelrod/strategies/backstabber.py b/axelrod/strategies/backstabber.py index 0d9d821d9..d43d786c8 100644 --- a/axelrod/strategies/backstabber.py +++ b/axelrod/strategies/backstabber.py @@ -27,6 +27,7 @@ class BackStabber(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return _backstabber_strategy(opponent) @@ -56,6 +57,7 @@ class DoubleCrosser(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if _opponent_triggers_alt_strategy(opponent): return _alt_strategy(opponent) return _backstabber_strategy(opponent) @@ -97,7 +99,9 @@ def _opponent_triggers_alt_strategy(opponent: Player) -> bool: return before_alt_strategy < current_round <= last_round_of_alt_strategy -def _opponent_defected_in_first_n_rounds(opponent: Player, first_n_rounds: int) -> bool: +def _opponent_defected_in_first_n_rounds( + opponent: Player, first_n_rounds: int +) -> bool: """ If opponent defected in the first N rounds, return True. Else return False. """ diff --git a/axelrod/strategies/better_and_better.py b/axelrod/strategies/better_and_better.py index 5a231187f..89fa96110 100644 --- a/axelrod/strategies/better_and_better.py +++ b/axelrod/strategies/better_and_better.py @@ -25,6 +25,7 @@ class BetterAndBetter(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" current_round = len(self.history) + 1 probability = current_round / 1000 return self._random.random_choice(probability) diff --git a/axelrod/strategies/bush_mosteller.py b/axelrod/strategies/bush_mosteller.py index 3fed4127b..268cacea3 100644 --- a/axelrod/strategies/bush_mosteller.py +++ b/axelrod/strategies/bush_mosteller.py @@ -62,7 +62,10 @@ def __init__( self._c_prob, self._d_prob = c_prob, d_prob self._init_c_prob, self._init_d_prob = c_prob, d_prob self._aspiration_level = abs( - (max(self.match_attributes["game"].RPST()) / aspiration_level_divider) + ( + max(self.match_attributes["game"].RPST()) + / aspiration_level_divider + ) ) self._stimulus = 0.0 @@ -104,7 +107,9 @@ def stimulus_update(self, opponent: Player): ) elif self._stimulus < 0: - self._c_prob += self._learning_rate * self._stimulus * self._c_prob + self._c_prob += ( + self._learning_rate * self._stimulus * self._c_prob + ) # Updates probability following previous choice D if self.history[-1] == D: @@ -114,15 +119,22 @@ def stimulus_update(self, opponent: Player): ) elif self._stimulus < 0: - self._d_prob += self._learning_rate * self._stimulus * self._d_prob + self._d_prob += ( + self._learning_rate * self._stimulus * self._d_prob + ) def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # First turn if len(self.history) == 0: - return self._random.random_choice(self._c_prob / (self._c_prob + self._d_prob)) + return self._random.random_choice( + self._c_prob / (self._c_prob + self._d_prob) + ) # Updating stimulus depending on his own latest choice self.stimulus_update(opponent) - return self._random.random_choice(self._c_prob / (self._c_prob + self._d_prob)) + return self._random.random_choice( + self._c_prob / (self._c_prob + self._d_prob) + ) diff --git a/axelrod/strategies/calculator.py b/axelrod/strategies/calculator.py index 05cdba01e..8b2546592 100644 --- a/axelrod/strategies/calculator.py +++ b/axelrod/strategies/calculator.py @@ -37,10 +37,12 @@ def set_seed(self, seed: int = None): self.joss_instance.set_seed(seed) def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) if turn > 0: - self.joss_instance.history.append(self.history[-1], - opponent.history[-1]) + self.joss_instance.history.append( + self.history[-1], opponent.history[-1] + ) if turn == 20: self.cycle = detect_cycle(opponent.history) return self.extended_strategy(opponent) diff --git a/axelrod/strategies/cooperator.py b/axelrod/strategies/cooperator.py index 61d4fd4ca..63899e3a4 100644 --- a/axelrod/strategies/cooperator.py +++ b/axelrod/strategies/cooperator.py @@ -26,6 +26,7 @@ class Cooperator(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return C @@ -59,8 +60,9 @@ def strategy(self, opponent: Player) -> Action: After 3 rounds, if opponent has not defected to a max history depth of 10, defect. """ - if self._has_played_enough_rounds_to_be_tricky() and self._opponents_has_cooperated_enough_to_be_tricky( - opponent + if ( + self._has_played_enough_rounds_to_be_tricky() + and self._opponents_has_cooperated_enough_to_be_tricky(opponent) ): return D return C diff --git a/axelrod/strategies/cycler.py b/axelrod/strategies/cycler.py index bff8c83d4..ac2be686a 100644 --- a/axelrod/strategies/cycler.py +++ b/axelrod/strategies/cycler.py @@ -45,6 +45,7 @@ def _get_first_three() -> List[Action]: return [C, D, D] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" while self.first_three: return self.first_three.pop(0) if self.cycle_counter < self.cycle_length: @@ -91,6 +92,7 @@ def __init__(self, cycle: str = "CCD") -> None: self.set_cycle(cycle=cycle) def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return next(self.cycle_iter) def set_cycle(self, cycle: str): @@ -111,23 +113,25 @@ def __init__( cycle_length: int = None, mutation_probability: float = 0.2, mutation_potency: int = 1, - seed: int = None + seed: int = None, ) -> None: EvolvablePlayer.__init__(self, seed=seed) cycle, cycle_length = self._normalize_parameters(cycle, cycle_length) Cycler.__init__(self, cycle=cycle) # Overwrite init_kwargs in the case that we generated a new cycle from cycle_length - self.overwrite_init_kwargs( - cycle=cycle, - cycle_length=cycle_length) + self.overwrite_init_kwargs(cycle=cycle, cycle_length=cycle_length) self.mutation_probability = mutation_probability self.mutation_potency = mutation_potency - def _normalize_parameters(self, cycle=None, cycle_length=None) -> Tuple[str, int]: + def _normalize_parameters( + self, cycle=None, cycle_length=None + ) -> Tuple[str, int]: """Compute other parameters from those that may be missing, to ensure proper cloning.""" if not cycle: if not cycle_length: - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableCycler") + raise InsufficientParametersError( + "Insufficient Parameters to instantiate EvolvableCycler" + ) cycle = self._generate_random_cycle(cycle_length) cycle_length = len(cycle) return cycle, cycle_length @@ -136,7 +140,9 @@ def _generate_random_cycle(self, cycle_length: int) -> str: """ Generate a sequence of random moves """ - return actions_to_str(self._random.choice(actions) for _ in range(cycle_length)) + return actions_to_str( + self._random.choice(actions) for _ in range(cycle_length) + ) def mutate(self) -> EvolvablePlayer: """ @@ -145,8 +151,12 @@ def mutate(self) -> EvolvablePlayer: if self._random.random() <= self.mutation_probability: mutated_sequence = list(str_to_actions(self.cycle)) for _ in range(self.mutation_potency): - index_to_change = self._random.randint(0, len(mutated_sequence) - 1) - mutated_sequence[index_to_change] = mutated_sequence[index_to_change].flip() + index_to_change = self._random.randint( + 0, len(mutated_sequence) - 1 + ) + mutated_sequence[index_to_change] = mutated_sequence[ + index_to_change + ].flip() cycle = actions_to_str(mutated_sequence) else: cycle = self.cycle @@ -158,7 +168,9 @@ def crossover(self, other) -> EvolvablePlayer: Creates and returns a new Player instance with a single crossover point. """ if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") + raise TypeError( + "Crossover must be between the same player classes." + ) cycle_list = crossover_lists(self.cycle, other.cycle, self._random) cycle = "".join(cycle_list) cycle, _ = self._normalize_parameters(cycle) diff --git a/axelrod/strategies/darwin.py b/axelrod/strategies/darwin.py index 974c6f575..c5fe2e00f 100644 --- a/axelrod/strategies/darwin.py +++ b/axelrod/strategies/darwin.py @@ -60,6 +60,7 @@ def foil_strategy_inspection() -> Action: return C def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" trial = len(self.history) if trial > 0: diff --git a/axelrod/strategies/dbs.py b/axelrod/strategies/dbs.py index 8e7916718..720c7fdb2 100644 --- a/axelrod/strategies/dbs.py +++ b/axelrod/strategies/dbs.py @@ -210,6 +210,7 @@ def compute_prob_rule(self, outcome, alpha=1): return p_cond def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # First move if not self.history: return C @@ -266,7 +267,9 @@ def strategy(self, opponent: Player) -> Action: if r_minus_in_Rd: self.v += 1 # If the number of violations is superior to a threshold, clean Rd. - if (self.v > self.reject_threshold) or (r_plus_in_Rc and r_minus_in_Rd): + if (self.v > self.reject_threshold) or ( + r_plus_in_Rc and r_minus_in_Rd + ): self.Rd.clear() self.v = 0 @@ -274,7 +277,9 @@ def strategy(self, opponent: Player) -> Action: Rp = {} all_cond = [(C, C), (C, D), (D, C), (D, D)] for outcome in all_cond: - if (outcome not in self.Rc.keys()) and (outcome not in self.Rd.keys()): + if (outcome not in self.Rc.keys()) and ( + outcome not in self.Rd.keys() + ): # Compute opponent's C answer probability. Rp[outcome] = self.compute_prob_rule(outcome, self.alpha) @@ -327,8 +332,12 @@ def get_siblings(self): siblings which are DeterministicNodes, their depth is equal to current node depth's + 1. """ - opponent_c_choice = DeterministicNode(self.own_action, C, self.depth + 1) - opponent_d_choice = DeterministicNode(self.own_action, D, self.depth + 1) + opponent_c_choice = DeterministicNode( + self.own_action, C, self.depth + 1 + ) + opponent_d_choice = DeterministicNode( + self.own_action, D, self.depth + 1 + ) return opponent_c_choice, opponent_d_choice def is_stochastic(self): @@ -354,8 +363,12 @@ def get_siblings(self, policy): of the same depth as the current node. Their probabilities pC are defined by the policy argument. """ - c_choice = StochasticNode(C, policy[(self.action1, self.action2)], self.depth) - d_choice = StochasticNode(D, policy[(self.action1, self.action2)], self.depth) + c_choice = StochasticNode( + C, policy[(self.action1, self.action2)], self.depth + ) + d_choice = StochasticNode( + D, policy[(self.action1, self.action2)], self.depth + ) return c_choice, d_choice def is_stochastic(self): @@ -401,7 +414,9 @@ def minimax_tree_search(begin_node, policy, max_depth): # The stochastic node value is the expected value of siblings. node_value = begin_node.pC * minimax_tree_search( siblings[0], policy, max_depth - ) + (1 - begin_node.pC) * minimax_tree_search(siblings[1], policy, max_depth) + ) + (1 - begin_node.pC) * minimax_tree_search( + siblings[1], policy, max_depth + ) return node_value else: # Deterministic node if begin_node.depth == max_depth: @@ -433,7 +448,9 @@ def move_gen(outcome, policy, depth_search_tree=5): using tree-search procedure. """ current_node = DeterministicNode(outcome[0], outcome[1], depth=0) - values_of_choices = minimax_tree_search(current_node, policy, depth_search_tree) + values_of_choices = minimax_tree_search( + current_node, policy, depth_search_tree + ) # Returns the Action which correspond to the best choice in terms of # expected value. In case value(C) == value(D), returns C. actions_tuple = (C, D) diff --git a/axelrod/strategies/defector.py b/axelrod/strategies/defector.py index 7b4831443..f52637ba0 100644 --- a/axelrod/strategies/defector.py +++ b/axelrod/strategies/defector.py @@ -26,6 +26,7 @@ class Defector(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return D @@ -54,6 +55,9 @@ def strategy(self, opponent: Player) -> Action: Defect if opponent has cooperated at least once in the past and has defected for the last 3 turns in a row. """ - if opponent.history.cooperations > 0 and opponent.history[-3:] == [D] * 3: + if ( + opponent.history.cooperations > 0 + and opponent.history[-3:] == [D] * 3 + ): return C return D diff --git a/axelrod/strategies/doubler.py b/axelrod/strategies/doubler.py index 4ed6b5ad0..67a7d87f3 100644 --- a/axelrod/strategies/doubler.py +++ b/axelrod/strategies/doubler.py @@ -25,6 +25,7 @@ class Doubler(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not self.history: return C if ( diff --git a/axelrod/strategies/finite_state_machines.py b/axelrod/strategies/finite_state_machines.py index 29fa1a291..b75cf090f 100644 --- a/axelrod/strategies/finite_state_machines.py +++ b/axelrod/strategies/finite_state_machines.py @@ -70,7 +70,9 @@ def state_transitions(self) -> dict: return self._state_transitions.copy() def transitions(self) -> list: - return [[x[0], x[1], y[0], y[1]] for x, y in self._state_transitions.items()] + return [ + [x[0], x[1], y[0], y[1]] for x, y in self._state_transitions.items() + ] def move(self, opponent_action: Action) -> Action: """Computes the response move and changes state.""" @@ -112,7 +114,7 @@ def __init__( self, transitions: Tuple[Transition, ...] = ((1, C, 1, C), (1, D, 1, D)), initial_state: int = 1, - initial_action: Action = C + initial_action: Action = C, ) -> None: Player.__init__(self) self.initial_state = initial_state @@ -120,6 +122,7 @@ def __init__( self.fsm = SimpleFSM(transitions, initial_state) def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return self.initial_action else: @@ -147,39 +150,62 @@ def __init__( initial_action: Action = None, num_states: int = None, mutation_probability: float = 0.1, - seed: int = None + seed: int = None, ) -> None: """If transitions, initial_state, and initial_action are None then generate random parameters using num_states.""" EvolvablePlayer.__init__(self, seed=seed) - transitions, initial_state, initial_action, num_states = self._normalize_parameters( - transitions, initial_state, initial_action, num_states) + ( + transitions, + initial_state, + initial_action, + num_states, + ) = self._normalize_parameters( + transitions, initial_state, initial_action, num_states + ) FSMPlayer.__init__( self, transitions=transitions, initial_state=initial_state, - initial_action=initial_action) + initial_action=initial_action, + ) self.mutation_probability = mutation_probability self.overwrite_init_kwargs( transitions=transitions, initial_state=initial_state, initial_action=initial_action, - num_states=self.num_states) + num_states=self.num_states, + ) @classmethod - def normalize_transitions(cls, transitions: Sequence[Sequence]) -> Tuple[Tuple[Any, ...], ...]: + def normalize_transitions( + cls, transitions: Sequence[Sequence] + ) -> Tuple[Tuple[Any, ...], ...]: """Translate a list of lists to a tuple of tuples.""" normalized = [] for t in transitions: normalized.append(tuple(t)) return tuple(normalized) - def _normalize_parameters(self, transitions: Tuple = None, initial_state: int = None, initial_action: Action = None, - num_states: int = None) -> Tuple[Tuple, int, Action, int]: - if not ((transitions is not None) and (initial_state is not None) and (initial_action is not None)): + def _normalize_parameters( + self, + transitions: Tuple = None, + initial_state: int = None, + initial_action: Action = None, + num_states: int = None, + ) -> Tuple[Tuple, int, Action, int]: + if not ( + (transitions is not None) + and (initial_state is not None) + and (initial_action is not None) + ): if not num_states: - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableFSMPlayer") - transitions, initial_state, initial_action = self.random_params(num_states) + raise InsufficientParametersError( + "Insufficient Parameters to instantiate EvolvableFSMPlayer" + ) + transitions, initial_state, initial_action = self.random_params( + num_states + ) transitions = self.normalize_transitions(transitions) num_states = len(transitions) // 2 return transitions, initial_state, initial_action, num_states @@ -188,7 +214,9 @@ def _normalize_parameters(self, transitions: Tuple = None, initial_state: int = def num_states(self) -> int: return self.fsm.num_states() - def random_params(self, num_states: int) -> Tuple[Tuple[Transition, ...], int, Action]: + def random_params( + self, num_states: int + ) -> Tuple[Tuple[Transition, ...], int, Action]: rows = [] for j in range(num_states): for action in actions: @@ -225,10 +253,14 @@ def mutate(self): if self._random.random() < self.mutation_probability / 10: initial_action = self.initial_action.flip() initial_state = self.initial_state - if self._random.random() < self.mutation_probability / (10 * self.num_states): + if self._random.random() < self.mutation_probability / ( + 10 * self.num_states + ): initial_state = self._random.randint(0, self.num_states) try: - transitions = self.mutate_rows(self.fsm.transitions(), self.mutation_probability) + transitions = self.mutate_rows( + self.fsm.transitions(), self.mutation_probability + ) self.fsm = SimpleFSM(transitions, self.initial_state) except ValueError: # If the FSM is malformed, try again. @@ -239,7 +271,9 @@ def mutate(self): initial_action=initial_action, ) - def crossover_rows(self, rows1: List[List], rows2: List[List]) -> List[List]: + def crossover_rows( + self, rows1: List[List], rows2: List[List] + ) -> List[List]: num_states = len(rows1) // 2 cross_point = 2 * self._random.randint(0, num_states) new_rows = copy_lists(rows1[:cross_point]) @@ -248,8 +282,12 @@ def crossover_rows(self, rows1: List[List], rows2: List[List]) -> List[List]: def crossover(self, other): if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - transitions = self.crossover_rows(self.fsm.transitions(), other.fsm.transitions()) + raise TypeError( + "Crossover must be between the same player classes." + ) + transitions = self.crossover_rows( + self.fsm.transitions(), other.fsm.transitions() + ) transitions = self.normalize_transitions(transitions) return self.create_new(transitions=transitions) @@ -267,22 +305,28 @@ def receive_vector(self, vector): Finally, a probability to determine the player's first move. """ num_states = self.fsm.num_states() - state_scale = vector[:num_states * 2] + state_scale = vector[: num_states * 2] next_states = [int(s * (num_states - 1)) for s in state_scale] - actions = vector[num_states * 2: -1] + actions = vector[num_states * 2 : -1] self.initial_action = C if round(vector[-1]) == 0 else D self.initial_state = 1 transitions = [] - for i, (initial_state, action) in enumerate(itertools.product(range(num_states), [C, D])): + for i, (initial_state, action) in enumerate( + itertools.product(range(num_states), [C, D]) + ): next_action = C if round(actions[i]) == 0 else D - transitions.append([initial_state, action, next_states[i], next_action]) + transitions.append( + [initial_state, action, next_states[i], next_action] + ) transitions = self.normalize_transitions(transitions) self.fsm = SimpleFSM(transitions, self.initial_state) - self.overwrite_init_kwargs(transitions=transitions, - initial_state=self.initial_state, - initial_action=self.initial_action) + self.overwrite_init_kwargs( + transitions=transitions, + initial_state=self.initial_state, + initial_action=self.initial_action, + ) def create_vector_bounds(self): """Creates the bounds for the decision variables.""" diff --git a/axelrod/strategies/gambler.py b/axelrod/strategies/gambler.py index 3e50a76b9..4260aa40f 100644 --- a/axelrod/strategies/gambler.py +++ b/axelrod/strategies/gambler.py @@ -43,6 +43,7 @@ class Gambler(LookerUp): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" actions_or_float = super(Gambler, self).strategy(opponent) if isinstance(actions_or_float, Action): return actions_or_float @@ -59,7 +60,7 @@ def __init__( pattern: Any = None, # pattern is str or tuple of Actions. parameters: Plays = None, mutation_probability: float = None, - seed: int = None + seed: int = None, ) -> None: EvolvableLookerUp.__init__( self, @@ -68,7 +69,7 @@ def __init__( pattern=pattern, parameters=parameters, mutation_probability=mutation_probability, - seed=seed + seed=seed, ) self.pattern = list(self.pattern) Gambler.__init__( @@ -76,7 +77,7 @@ def __init__( lookup_dict=self.lookup_dict, initial_actions=self.initial_actions, pattern=self.pattern, - parameters=self.parameters + parameters=self.parameters, ) self.overwrite_init_kwargs( lookup_dict=self.lookup_dict, @@ -105,7 +106,9 @@ def receive_vector(self, vector): """Receives a vector and updates the player's pattern. Ignores extra parameters.""" self.pattern = vector self_depth, op_depth, op_openings_depth = self.parameters - self._lookup = LookupTable.from_pattern(self.pattern, self_depth, op_depth, op_openings_depth) + self._lookup = LookupTable.from_pattern( + self.pattern, self_depth, op_depth, op_openings_depth + ) def create_vector_bounds(self): """Creates the bounds for the decision variables. Ignores extra parameters.""" diff --git a/axelrod/strategies/gobymajority.py b/axelrod/strategies/gobymajority.py index 7617b571f..9a7692c5c 100644 --- a/axelrod/strategies/gobymajority.py +++ b/axelrod/strategies/gobymajority.py @@ -60,7 +60,9 @@ def __init__( self.memory = self.classifier["memory_depth"] else: self.memory = 0 - self.name = "Go By Majority" + (self.memory > 0) * (": %i" % self.memory) + self.name = "Go By Majority" + (self.memory > 0) * ( + ": %i" % self.memory + ) if self.soft: self.name = "Soft " + self.name else: diff --git a/axelrod/strategies/gradualkiller.py b/axelrod/strategies/gradualkiller.py index d421d06a2..d964ea040 100644 --- a/axelrod/strategies/gradualkiller.py +++ b/axelrod/strategies/gradualkiller.py @@ -31,6 +31,7 @@ class GradualKiller(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if opponent.history[5:7] == [D, D]: return D return C diff --git a/axelrod/strategies/grudger.py b/axelrod/strategies/grudger.py index c4e197e1c..4aca2c40a 100644 --- a/axelrod/strategies/grudger.py +++ b/axelrod/strategies/grudger.py @@ -134,6 +134,7 @@ class Aggravater(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) < 3: return D elif opponent.defections: @@ -167,8 +168,7 @@ def __init__(self) -> None: self.grudge_memory = 0 def strategy(self, opponent: Player) -> Action: - """Begins by playing C, then plays D, D, D, D, C, C against a defection - """ + """Begins by playing C, then plays D, D, D, D, C, C against a defection""" if self.grudged: strategy = [D, D, D, C, C][self.grudge_memory] self.grudge_memory += 1 diff --git a/axelrod/strategies/handshake.py b/axelrod/strategies/handshake.py index 41e02ff13..6e24ee1cd 100644 --- a/axelrod/strategies/handshake.py +++ b/axelrod/strategies/handshake.py @@ -32,6 +32,7 @@ def __init__(self, initial_plays: List[Action] = None) -> None: self.initial_plays = initial_plays def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Begin by playing the sequence C, D index = len(self.history) if index < len(self.initial_plays): diff --git a/axelrod/strategies/hmm.py b/axelrod/strategies/hmm.py index ad64ddb55..f91fd31ad 100644 --- a/axelrod/strategies/hmm.py +++ b/axelrod/strategies/hmm.py @@ -59,7 +59,11 @@ class SimpleHMM(object): """ def __init__( - self, transitions_C, transitions_D, emission_probabilities, initial_state + self, + transitions_C, + transitions_D, + emission_probabilities, + initial_state, ) -> None: """ Params @@ -133,13 +137,17 @@ def move(self, opponent_action: Action) -> Action: next_state = self._cache_C[self.state] except KeyError: num_states = len(self.emission_probabilities) - next_state = self._random.choice(num_states, 1, p=self.transitions_C[self.state])[0] + next_state = self._random.choice( + num_states, 1, p=self.transitions_C[self.state] + )[0] else: try: next_state = self._cache_D[self.state] except KeyError: num_states = len(self.emission_probabilities) - next_state = self._random.choice(num_states, 1, p=self.transitions_D[self.state])[0] + next_state = self._random.choice( + num_states, 1, p=self.transitions_D[self.state] + )[0] self.state = next_state # Choose action to emit. @@ -178,7 +186,7 @@ def __init__( transitions_D=None, emission_probabilities=None, initial_state=0, - initial_action=C + initial_action=C, ) -> None: Player.__init__(self) if not transitions_C: @@ -189,7 +197,10 @@ def __init__( self.initial_state = initial_state self.initial_action = initial_action self.hmm = SimpleHMM( - copy_lists(transitions_C), copy_lists(transitions_D), list(emission_probabilities), initial_state + copy_lists(transitions_C), + copy_lists(transitions_D), + list(emission_probabilities), + initial_state, ) assert self.hmm.is_well_formed() self.state = self.hmm.state @@ -208,6 +219,7 @@ def is_stochastic(self) -> bool: return False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return self.initial_action else: @@ -230,6 +242,7 @@ def set_seed(self, seed=None): class EvolvableHMMPlayer(HMMPlayer, EvolvablePlayer): """Evolvable version of HMMPlayer.""" + name = "EvolvableHMMPlayer" def __init__( @@ -241,18 +254,35 @@ def __init__( initial_action=C, num_states=None, mutation_probability=None, - seed: int = None + seed: int = None, ) -> None: EvolvablePlayer.__init__(self, seed=seed) - transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability = self._normalize_parameters( - transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability) + ( + transitions_C, + transitions_D, + emission_probabilities, + initial_state, + initial_action, + num_states, + mutation_probability, + ) = self._normalize_parameters( + transitions_C, + transitions_D, + emission_probabilities, + initial_state, + initial_action, + num_states, + mutation_probability, + ) self.mutation_probability = mutation_probability - HMMPlayer.__init__(self, - transitions_C=transitions_C, - transitions_D=transitions_D, - emission_probabilities=emission_probabilities, - initial_state=initial_state, - initial_action=initial_action) + HMMPlayer.__init__( + self, + transitions_C=transitions_C, + transitions_D=transitions_D, + emission_probabilities=emission_probabilities, + initial_state=initial_state, + initial_action=initial_action, + ) self.hmm._random = self._random self.overwrite_init_kwargs( transitions_C=transitions_C, @@ -261,16 +291,35 @@ def __init__( initial_state=initial_state, initial_action=initial_action, num_states=num_states, - mutation_probability=mutation_probability + mutation_probability=mutation_probability, ) - def _normalize_parameters(self, transitions_C=None, transitions_D=None, emission_probabilities=None, - initial_state=None, initial_action=None, num_states=None, mutation_probability=None): - if not ((transitions_C and transitions_D and emission_probabilities) and (initial_state is not None) and (initial_action is not None)): + def _normalize_parameters( + self, + transitions_C=None, + transitions_D=None, + emission_probabilities=None, + initial_state=None, + initial_action=None, + num_states=None, + mutation_probability=None, + ): + if not ( + (transitions_C and transitions_D and emission_probabilities) + and (initial_state is not None) + and (initial_action is not None) + ): if not num_states: - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableHMMPlayer") - transitions_C, transitions_D, emission_probabilities, initial_state, initial_action = self.random_params( - num_states) + raise InsufficientParametersError( + "Insufficient Parameters to instantiate EvolvableHMMPlayer" + ) + ( + transitions_C, + transitions_D, + emission_probabilities, + initial_state, + initial_action, + ) = self.random_params(num_states) # Normalize types of various matrices for m in [transitions_C, transitions_D]: for i in range(len(m)): @@ -281,7 +330,15 @@ def _normalize_parameters(self, transitions_C=None, transitions_D=None, emission mutation_probability = 10 / (num_states ** 2) else: mutation_probability = mutation_probability - return transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability + return ( + transitions_C, + transitions_D, + emission_probabilities, + initial_state, + initial_action, + num_states, + mutation_probability, + ) def random_params(self, num_states): transitions_C = [] @@ -293,7 +350,13 @@ def random_params(self, num_states): emission_probabilities.append(self._random.random()) initial_state = self._random.randint(0, num_states) initial_action = C - return transitions_C, transitions_D, emission_probabilities, initial_state, initial_action + return ( + transitions_C, + transitions_D, + emission_probabilities, + initial_state, + initial_action, + ) @property def num_states(self): @@ -307,16 +370,23 @@ def mutate_rows(self, rows, mutation_probability): def mutate(self): transitions_C = self.mutate_rows( - self.hmm.transitions_C, self.mutation_probability) + self.hmm.transitions_C, self.mutation_probability + ) transitions_D = self.mutate_rows( - self.hmm.transitions_D, self.mutation_probability) + self.hmm.transitions_D, self.mutation_probability + ) emission_probabilities = mutate_row( - self.hmm.emission_probabilities, self.mutation_probability, self._random) + self.hmm.emission_probabilities, + self.mutation_probability, + self._random, + ) initial_action = self.initial_action if self._random.random() < self.mutation_probability / 10: initial_action = self.initial_action.flip() initial_state = self.initial_state - if self._random.random() < self.mutation_probability / (10 * self.num_states): + if self._random.random() < self.mutation_probability / ( + 10 * self.num_states + ): initial_state = self._random.randint(0, self.num_states) return self.create_new( transitions_C=transitions_C, @@ -328,15 +398,24 @@ def mutate(self): def crossover(self, other): if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - transitions_C = crossover_lists(self.hmm.transitions_C, other.hmm.transitions_C, self._random) - transitions_D = crossover_lists(self.hmm.transitions_D, other.hmm.transitions_D, self._random) + raise TypeError( + "Crossover must be between the same player classes." + ) + transitions_C = crossover_lists( + self.hmm.transitions_C, other.hmm.transitions_C, self._random + ) + transitions_D = crossover_lists( + self.hmm.transitions_D, other.hmm.transitions_D, self._random + ) emission_probabilities = crossover_lists( - self.hmm.emission_probabilities, other.hmm.emission_probabilities, self._random) + self.hmm.emission_probabilities, + other.hmm.emission_probabilities, + self._random, + ) return self.create_new( transitions_C=transitions_C, transitions_D=transitions_D, - emission_probabilities=emission_probabilities + emission_probabilities=emission_probabilities, ) def receive_vector(self, vector): @@ -353,12 +432,12 @@ class with self.num_states. entry is the initial_action. """ - assert(len(vector) == 2 * self.num_states ** 2 + self.num_states + 1) + assert len(vector) == 2 * self.num_states ** 2 + self.num_states + 1 def deserialize(vector): matrix = [] for i in range(self.num_states): - row = vector[self.num_states * i: self.num_states * (i + 1)] + row = vector[self.num_states * i : self.num_states * (i + 1)] row = normalize_vector(row) matrix.append(row) return matrix @@ -371,7 +450,7 @@ def deserialize(vector): deserialize(vector[0:break_tc]), deserialize(vector[break_tc:break_td]), normalize_vector(vector[break_td:break_ep]), - initial_state + initial_state, ) self.initial_action = C if round(vector[-1]) == 0 else D self.initial_state = initial_state diff --git a/axelrod/strategies/human.py b/axelrod/strategies/human.py index b3a0e9564..190590c12 100644 --- a/axelrod/strategies/human.py +++ b/axelrod/strategies/human.py @@ -36,7 +36,9 @@ def validate(self, document) -> None: text = document.text if text and text.upper() not in ["C", "D"]: - raise ValidationError(message="Action must be C or D", cursor_position=0) + raise ValidationError( + message="Action must be C or D", cursor_position=0 + ) class Human(Player): @@ -83,10 +85,14 @@ def _history_toolbar(self): Described at http://python-prompt-toolkit.readthedocs.io/en/latest/pages/building_prompts.html#adding-a-bottom-toolbar """ my_history = [self.symbols[action] for action in self.history] - opponent_history = [self.symbols[action] for action in self.history.coplays] + opponent_history = [ + self.symbols[action] for action in self.history.coplays + ] history = list(zip(my_history, opponent_history)) if self.history: - content = "History ({}, opponent): {}".format(self.human_name, history) + content = "History ({}, opponent): {}".format( + self.human_name, history + ) else: content = "" return content @@ -112,12 +118,14 @@ def _status_messages(self): if PROMPT2 else lambda cli: [(token_toolbar, self._history_toolbar())] ) - print_statement = "{}Turn {}: {} played {}, opponent played {}".format( - linesep, - len(self.history), - self.human_name, - self.symbols[self.history[-1]], - self.symbols[self.history.coplays[-1]], + print_statement = ( + "{}Turn {}: {} played {}, opponent played {}".format( + linesep, + len(self.history), + self.human_name, + self.symbols[self.history[-1]], + self.symbols[self.history.coplays[-1]], + ) ) else: toolbar = None diff --git a/axelrod/strategies/hunter.py b/axelrod/strategies/hunter.py index 51c6968d7..898d3a582 100644 --- a/axelrod/strategies/hunter.py +++ b/axelrod/strategies/hunter.py @@ -26,7 +26,11 @@ class DefectorHunter(Player): } def strategy(self, opponent: Player) -> Action: - if len(self.history) >= 4 and len(opponent.history) == opponent.defections: + """Actual strategy definition that determines player's action.""" + if ( + len(self.history) >= 4 + and len(opponent.history) == opponent.defections + ): return D return C @@ -50,7 +54,11 @@ class CooperatorHunter(Player): } def strategy(self, opponent: Player) -> Action: - if len(self.history) >= 4 and len(opponent.history) == opponent.cooperations: + """Actual strategy definition that determines player's action.""" + if ( + len(self.history) >= 4 + and len(opponent.history) == opponent.cooperations + ): return D return C @@ -85,11 +93,11 @@ def __init__(self) -> None: self.is_alt = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) < 6: return C if len(self.history) == 6: - if is_alternator(opponent.history): - self.is_alt = True + self.is_alt = is_alternator(opponent.history) if self.is_alt: return D return C @@ -119,6 +127,7 @@ def __init__(self) -> None: self.cycle = None # type: Optional[Tuple[Action]] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if self.cycle: return D cycle = detect_cycle(opponent.history, min_size=3) @@ -140,6 +149,7 @@ class EventualCycleHunter(CycleHunter): name = "Eventual Cycle Hunter" def strategy(self, opponent: Player) -> None: + """Actual strategy definition that determines player's action.""" if len(opponent.history) < 10: return C if len(opponent.history) == opponent.cooperations: @@ -244,6 +254,8 @@ def strategy(self, opponent: Player) -> Action: probabilities.append(self.countCC / self.cooperations) if self.defections > 5: probabilities.append(self.countDD / self.defections) - if probabilities and all([abs(p - 0.5) < 0.25 for p in probabilities]): + if probabilities and all( + [abs(p - 0.5) < 0.25 for p in probabilities] + ): return D return C diff --git a/axelrod/strategies/lookerup.py b/axelrod/strategies/lookerup.py index 1f57dc685..599106e4f 100644 --- a/axelrod/strategies/lookerup.py +++ b/axelrod/strategies/lookerup.py @@ -101,7 +101,11 @@ def _raise_error_for_bad_lookup_dict(self): @classmethod def from_pattern( - cls, pattern: tuple, player_depth: int, op_depth: int, op_openings_depth: int + cls, + pattern: tuple, + player_depth: int, + op_depth: int, + op_openings_depth: int, ): keys = create_lookup_table_keys( player_depth=player_depth, @@ -151,7 +155,9 @@ def display( """ def sorter(plays): - return tuple(actions_to_str(getattr(plays, field) for field in sort_by)) + return tuple( + actions_to_str(getattr(plays, field) for field in sort_by) + ) col_width = 11 sorted_keys = sorted(self._dict, key=sorter) @@ -318,7 +324,7 @@ def __init__( lookup_dict: dict = None, initial_actions: tuple = None, pattern: Any = None, # pattern is str or tuple of Action's. - parameters: Plays = None + parameters: Plays = None, ) -> None: Player.__init__(self) @@ -361,6 +367,7 @@ def _get_initial_actions(self, initial_actions: tuple) -> tuple: return initial_actions[:table_depth] def strategy(self, opponent: Player) -> Reaction: + """Actual strategy definition that determines player's action.""" turn_index = len(opponent.history) while turn_index < len(self._initial_actions_pool): return self._initial_actions_pool[turn_index] @@ -404,11 +411,21 @@ def __init__( pattern: Any = None, # pattern is str or tuple of Action's. parameters: Plays = None, mutation_probability: float = None, - seed: int = None + seed: int = None, ) -> None: EvolvablePlayer.__init__(self, seed=seed) - lookup_dict, initial_actions, pattern, parameters, mutation_probability = self._normalize_parameters( - lookup_dict, initial_actions, pattern, parameters, mutation_probability + ( + lookup_dict, + initial_actions, + pattern, + parameters, + mutation_probability, + ) = self._normalize_parameters( + lookup_dict, + initial_actions, + pattern, + parameters, + mutation_probability, ) LookerUp.__init__( self, @@ -426,30 +443,50 @@ def __init__( mutation_probability=mutation_probability, ) - def _normalize_parameters(self, lookup_dict=None, initial_actions=None, pattern=None, parameters=None, - mutation_probability=None): + def _normalize_parameters( + self, + lookup_dict=None, + initial_actions=None, + pattern=None, + parameters=None, + mutation_probability=None, + ): if lookup_dict and initial_actions: # Compute the associated pattern and parameters # Map the table keys to namedTuple Plays - lookup_table = self._get_lookup_table(lookup_dict, pattern, parameters) + lookup_table = self._get_lookup_table( + lookup_dict, pattern, parameters + ) lookup_dict = lookup_table.dictionary - parameters = (lookup_table.player_depth, lookup_table.op_depth, lookup_table.op_openings_depth) + parameters = ( + lookup_table.player_depth, + lookup_table.op_depth, + lookup_table.op_openings_depth, + ) pattern = tuple(v for k, v in sorted(lookup_dict.items())) elif pattern and parameters and initial_actions: # Compute the associated lookup table plays, op_plays, op_start_plays = parameters - lookup_table = self._get_lookup_table(lookup_dict, pattern, parameters) + lookup_table = self._get_lookup_table( + lookup_dict, pattern, parameters + ) lookup_dict = lookup_table.dictionary elif parameters: # Generate a random pattern and (maybe) initial actions plays, op_plays, op_start_plays = parameters - pattern, lookup_table = self.random_params(plays, op_plays, op_start_plays) + pattern, lookup_table = self.random_params( + plays, op_plays, op_start_plays + ) lookup_dict = lookup_table.dictionary if not initial_actions: num_actions = max([plays, op_plays, op_start_plays]) - initial_actions = tuple([self._random.choice((C, D)) for _ in range(num_actions)]) + initial_actions = tuple( + [self._random.choice((C, D)) for _ in range(num_actions)] + ) else: - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableLookerUp") + raise InsufficientParametersError( + "Insufficient Parameters to instantiate EvolvableLookerUp" + ) # Normalize pattern if isinstance(pattern, str): pattern = str_to_actions(pattern) @@ -457,8 +494,14 @@ def _normalize_parameters(self, lookup_dict=None, initial_actions=None, pattern= if mutation_probability is None: plays, op_plays, op_start_plays = parameters keys = create_lookup_table_keys(plays, op_plays, op_start_plays) - mutation_probability = 2. / len(keys) - return lookup_dict, initial_actions, pattern, parameters, mutation_probability + mutation_probability = 2.0 / len(keys) + return ( + lookup_dict, + initial_actions, + pattern, + parameters, + mutation_probability, + ) def random_value(self): return self._random.choice(actions) @@ -483,7 +526,9 @@ def mutate_table(self, table, mutation_probability): return table def mutate(self): - lookup_dict = self.mutate_table(self.lookup_dict, self.mutation_probability) + lookup_dict = self.mutate_table( + self.lookup_dict, self.mutation_probability + ) # Add in starting moves initial_actions = list(self.initial_actions) for i in range(len(initial_actions)): @@ -497,8 +542,12 @@ def mutate(self): def crossover(self, other): if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - lookup_dict = crossover_dictionaries(self.lookup_dict, other.lookup_dict, self._random) + raise TypeError( + "Crossover must be between the same player classes." + ) + lookup_dict = crossover_dictionaries( + self.lookup_dict, other.lookup_dict, self._random + ) return self.create_new(lookup_dict=lookup_dict) @@ -515,7 +564,9 @@ class EvolvedLookerUp1_1_1(LookerUp): def __init__(self) -> None: params = Plays(self_plays=1, op_plays=1, op_openings=1) - super().__init__(parameters=params, pattern="CDDDDCDD", initial_actions=(C,)) + super().__init__( + parameters=params, pattern="CDDDDCDD", initial_actions=(C,) + ) class EvolvedLookerUp2_2_2(LookerUp): @@ -531,8 +582,12 @@ class EvolvedLookerUp2_2_2(LookerUp): def __init__(self) -> None: params = Plays(self_plays=2, op_plays=2, op_openings=2) - pattern = "CDDCDCDDCDDDCDDDDDCDCDCCCDDCCDCDDDCCCCCDDDCDDDDDDDDDCCDDCDDDCCCD" - super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C)) + pattern = ( + "CDDCDCDDCDDDCDDDDDCDCDCCCDDCCDCDDDCCCCCDDDCDDDDDDDDDCCDDCDDDCCCD" + ) + super().__init__( + parameters=params, pattern=pattern, initial_actions=(C, C) + ) class Winner12(LookerUp): @@ -549,7 +604,9 @@ class Winner12(LookerUp): def __init__(self) -> None: params = Plays(self_plays=1, op_plays=2, op_openings=0) pattern = "CDCDDCDD" - super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C)) + super().__init__( + parameters=params, pattern=pattern, initial_actions=(C, C) + ) class Winner21(LookerUp): @@ -566,7 +623,9 @@ class Winner21(LookerUp): def __init__(self) -> None: params = Plays(self_plays=1, op_plays=2, op_openings=0) pattern = "CDCDCDDD" - super().__init__(parameters=params, pattern=pattern, initial_actions=(D, C)) + super().__init__( + parameters=params, pattern=pattern, initial_actions=(D, C) + ) def get_last_n_plays(player: Player, depth: int) -> tuple: diff --git a/axelrod/strategies/mathematicalconstants.py b/axelrod/strategies/mathematicalconstants.py index 70295a405..5d7013256 100644 --- a/axelrod/strategies/mathematicalconstants.py +++ b/axelrod/strategies/mathematicalconstants.py @@ -25,6 +25,7 @@ class CotoDeRatio(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Initially cooperate if len(opponent.history) == 0: return C diff --git a/axelrod/strategies/memoryone.py b/axelrod/strategies/memoryone.py index a8ad1c571..340727fbc 100644 --- a/axelrod/strategies/memoryone.py +++ b/axelrod/strategies/memoryone.py @@ -33,6 +33,7 @@ class WinStayLoseShift(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not self.history: return C # React to the opponent's last move @@ -66,7 +67,9 @@ class MemoryOnePlayer(Player): } def __init__( - self, four_vector: Tuple[float, float, float, float] = None, initial: Action = C + self, + four_vector: Tuple[float, float, float, float] = None, + initial: Action = C, ) -> None: """ Parameters @@ -109,7 +112,9 @@ def set_four_vector(self, four_vector: Tuple[float, float, float, float]): "An element in the probability vector, {}, is not " "between 0 and 1.".format(str(four_vector)) ) - self._four_vector = dict(zip([(C, C), (C, D), (D, C), (D, D)], four_vector)) + self._four_vector = dict( + zip([(C, C), (C, D), (D, C), (D, D)], four_vector) + ) def _post_init(self): # Adjust classifiers @@ -119,6 +124,7 @@ def _post_init(self): self.classifier["memory_depth"] = 0 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) == 0: return self._initial # Determine which probability to use @@ -324,6 +330,7 @@ class ALLCorALLD(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return self._random.random_choice(0.6) return self.history[-1] diff --git a/axelrod/strategies/memorytwo.py b/axelrod/strategies/memorytwo.py index 9573c308f..8ba2a9dfd 100644 --- a/axelrod/strategies/memorytwo.py +++ b/axelrod/strategies/memorytwo.py @@ -53,7 +53,9 @@ class MemoryTwoPlayer(Player): } def __init__( - self, sixteen_vector: Optional[Tuple[float, ...]] = None, initial: Optional[Tuple[Action, Action]] = None + self, + sixteen_vector: Optional[Tuple[float, ...]] = None, + initial: Optional[Tuple[Action, Action]] = None, ) -> None: """ Parameters @@ -69,7 +71,9 @@ def __init__( self._initial = initial self.set_initial_sixteen_vector(sixteen_vector) - def set_initial_sixteen_vector(self, sixteen_vector: Optional[Tuple[float, ...]]): + def set_initial_sixteen_vector( + self, sixteen_vector: Optional[Tuple[float, ...]] + ): if sixteen_vector is None: sixteen_vector = tuple([1] * 16) warnings.warn("Memory two player is set to default, Cooperator.") @@ -84,7 +88,8 @@ def set_sixteen_vector(self, sixteen_vector: Tuple[float, ...]): ) states = [ - (hist[:2], hist[2:]) for hist in list(itertools.product((C, D), repeat=4)) + (hist[:2], hist[2:]) + for hist in list(itertools.product((C, D), repeat=4)) ] self._sixteen_vector = dict( @@ -92,7 +97,9 @@ def set_sixteen_vector(self, sixteen_vector: Tuple[float, ...]): ) # type: Dict[tuple, float] @staticmethod - def compute_memory_depth(sixteen_vector: Dict[Tuple[Action, Action], float]) -> int: + def compute_memory_depth( + sixteen_vector: Dict[Tuple[Action, Action], float] + ) -> int: values = set(list(sixteen_vector.values())) # Memory-depth 0 @@ -119,9 +126,12 @@ def compute_memory_depth(sixteen_vector: Dict[Tuple[Action, Action], float]) -> def _post_init(self): values = set(self._sixteen_vector.values()) self.classifier["stochastic"] = any(0 < x < 1 for x in values) - self.classifier["memory_depth"] = self.compute_memory_depth(self._sixteen_vector) + self.classifier["memory_depth"] = self.compute_memory_depth( + self._sixteen_vector + ) def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) if turn <= 1: return self._initial[turn] @@ -259,12 +269,17 @@ class MEM2(Player): def __init__(self) -> None: super().__init__() - self.players = {"TFT": TitForTat(), "TFTT": TitFor2Tats(), "ALLD": Defector()} + self.players = { + "TFT": TitForTat(), + "TFTT": TitFor2Tats(), + "ALLD": Defector(), + } self.play_as = "TFT" self.shift_counter = 3 self.alld_counter = 0 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Update Histories # Note that this assumes that TFT and TFTT do not use internal counters, # Rather that they examine the actual history of play diff --git a/axelrod/strategies/meta.py b/axelrod/strategies/meta.py index 5f15b340e..d1e496254 100644 --- a/axelrod/strategies/meta.py +++ b/axelrod/strategies/meta.py @@ -104,7 +104,9 @@ def update_histories(self, coplay): # As a sanity check, look for at least one reclassifier, otherwise # this try-except clause could hide a bug. if len(self._reclassifiers) == 0: - raise TypeError("MetaClass update_histories issue, expected a reclassifier.") + raise TypeError( + "MetaClass update_histories issue, expected a reclassifier." + ) # Otherwise just update with C always, so at least the histories have the # expected length. for player in self.team: @@ -115,6 +117,7 @@ def update_history(self, play, coplay): self.update_histories(coplay) def strategy(self, opponent): + """Actual strategy definition that determines player's action.""" # Get the results of all our players. results = [] for player in self.team: @@ -234,7 +237,9 @@ def _post_init(self): # If the team has repeated identical members, then it reduces to a singular team # and it may not actually be stochastic. if team and len(set(team)) == 1: - self.classifier["stochastic"] = Classifiers["stochastic"](self.team[0]) + self.classifier["stochastic"] = Classifiers["stochastic"]( + self.team[0] + ) self.singular = True def meta_strategy(self, results, opponent): @@ -551,7 +556,9 @@ def _post_init(self): if distribution and len(set(distribution)) > 1: self.classifier["stochastic"] = True if len(self.team) == 1: - self.classifier["stochastic"] = Classifiers["stochastic"](self.team[0]) + self.classifier["stochastic"] = Classifiers["stochastic"]( + self.team[0] + ) # Overwrite strategy to avoid use of _random. This will ignore self.meta_strategy. self.index = 0 self.strategy = self.index_strategy @@ -564,7 +571,9 @@ def _post_init(self): if 1 in distribution: self.index = list(distribution).index(1) # It's potentially deterministic. - self.classifier["stochastic"] = Classifiers["stochastic"](self.team[self.index]) + self.classifier["stochastic"] = Classifiers["stochastic"]( + self.team[self.index] + ) # Overwrite strategy to avoid use of _random. This will ignore self.meta_strategy. self.strategy = self.index_strategy diff --git a/axelrod/strategies/mindcontrol.py b/axelrod/strategies/mindcontrol.py index e1c3fa915..979c4adee 100644 --- a/axelrod/strategies/mindcontrol.py +++ b/axelrod/strategies/mindcontrol.py @@ -18,7 +18,7 @@ class MindController(Player): "stochastic": False, "long_run_time": False, "inspects_source": False, - "manipulates_source": True, # Finds out what opponent will do + "manipulates_source": True, # Changes what opponent will do "manipulates_state": False, } @@ -62,6 +62,7 @@ def __setattr__(self, name: str, val: str): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" opponent.strategy = lambda opponent: C return D @@ -88,5 +89,6 @@ class MindBender(MindWarper): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" opponent.__dict__["strategy"] = lambda opponent: C return D diff --git a/axelrod/strategies/mutual.py b/axelrod/strategies/mutual.py index 4e31565df..25d302a35 100644 --- a/axelrod/strategies/mutual.py +++ b/axelrod/strategies/mutual.py @@ -47,6 +47,7 @@ class Hopeless(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not opponent.history: return self._random.random_choice() if self.history[-1] == C and opponent.history[-1] == C: diff --git a/axelrod/strategies/negation.py b/axelrod/strategies/negation.py index c48f56737..b6118eb02 100644 --- a/axelrod/strategies/negation.py +++ b/axelrod/strategies/negation.py @@ -25,6 +25,7 @@ class Negation(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Random first move if not self.history: return self._random.random_choice() diff --git a/axelrod/strategies/oncebitten.py b/axelrod/strategies/oncebitten.py index db58a27bb..433c3e5d1 100644 --- a/axelrod/strategies/oncebitten.py +++ b/axelrod/strategies/oncebitten.py @@ -72,6 +72,7 @@ class FoolMeOnce(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not opponent.history: return C if opponent.defections > 1: @@ -113,6 +114,7 @@ def __init__(self, forget_probability: float = 0.05) -> None: self.forget_probability = forget_probability def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" r = self._random.random() if not opponent.history: return self._initial diff --git a/axelrod/strategies/prober.py b/axelrod/strategies/prober.py index 0f5293c61..78118a91c 100644 --- a/axelrod/strategies/prober.py +++ b/axelrod/strategies/prober.py @@ -33,6 +33,7 @@ class CollectiveStrategy(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) if turn == 0: return C @@ -74,13 +75,14 @@ def __init__(self, initial_actions: List[Action] = None) -> None: self.initial_actions = initial_actions def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" hist_size = len(self.history) init_size = len(self.initial_actions) if hist_size < init_size: return self.initial_actions[hist_size] if D not in opponent.history[:init_size]: return D - return opponent.history[-1] # TFT + return opponent.history[-1] # TFT class Prober(Player): @@ -104,6 +106,7 @@ class Prober(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) if turn == 0: return D @@ -140,6 +143,7 @@ class Prober2(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) if turn == 0: return D @@ -176,6 +180,7 @@ class Prober3(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) if turn == 0: return D @@ -241,6 +246,7 @@ def __init__(self) -> None: self.turned_defector = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not self.history: return self.init_sequence[0] turn = len(self.history) @@ -283,6 +289,7 @@ class HardProber(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turn = len(self.history) if turn == 0: return D @@ -332,6 +339,7 @@ def __init__(self, p: float = 0.1) -> None: self.classifier["stochastic"] = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # First move if len(self.history) == 0: return C @@ -379,6 +387,7 @@ def __init__(self, p: float = 0.1) -> None: self.probing = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # First move if len(self.history) == 0: return C diff --git a/axelrod/strategies/punisher.py b/axelrod/strategies/punisher.py index 262901675..3fd587fc1 100644 --- a/axelrod/strategies/punisher.py +++ b/axelrod/strategies/punisher.py @@ -51,7 +51,9 @@ def strategy(self, opponent: Player) -> Action: return D elif D in opponent.history[-1:]: - self.mem_length = (opponent.defections * 20) // len(opponent.history) + self.mem_length = (opponent.defections * 20) // len( + opponent.history + ) self.grudged = True return D @@ -101,7 +103,9 @@ def strategy(self, opponent: Player) -> Action: self.grudge_memory += 1 return D elif D in opponent.history[-1:]: - self.mem_length = (opponent.cooperations * 20) // len(opponent.history) + self.mem_length = (opponent.cooperations * 20) // len( + opponent.history + ) if self.mem_length == 0: self.mem_length += 1 self.grudged = True @@ -131,6 +135,7 @@ class LevelPunisher(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) < 10: return C elif opponent.defections / len(opponent.history) > 0.2: @@ -161,6 +166,7 @@ class TrickyLevelPunisher(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) == 0: return C if len(opponent.history) < 10: diff --git a/axelrod/strategies/qlearner.py b/axelrod/strategies/qlearner.py index ef2819e01..1a81ae227 100644 --- a/axelrod/strategies/qlearner.py +++ b/axelrod/strategies/qlearner.py @@ -65,7 +65,9 @@ def strategy(self, opponent: Player) -> Action: if state not in self.Qs: self.Qs[state] = OrderedDict(zip([C, D], [0, 0])) self.Vs[state] = 0 - self.perform_q_learning(self.prev_state, state, self.prev_action, reward) + self.perform_q_learning( + self.prev_state, state, self.prev_action, reward + ) action = self.select_action(state) self.prev_state = state self.prev_action = action @@ -90,16 +92,22 @@ def find_state(self, opponent: Player) -> str: action_str = actions_to_str(opponent.history[-self.memory_length :]) return action_str + prob - def perform_q_learning(self, prev_state: str, state: str, action: Action, reward): + def perform_q_learning( + self, prev_state: str, state: str, action: Action, reward + ): """ Performs the qlearning algorithm """ - self.Qs[prev_state][action] = (1.0 - self.learning_rate) * self.Qs[prev_state][ - action - ] + self.learning_rate * (reward + self.discount_rate * self.Vs[state]) + self.Qs[prev_state][action] = (1.0 - self.learning_rate) * self.Qs[ + prev_state + ][action] + self.learning_rate * ( + reward + self.discount_rate * self.Vs[state] + ) self.Vs[prev_state] = max(self.Qs[prev_state].values()) - def find_reward(self, opponent: Player) -> Dict[Action, Dict[Action, Score]]: + def find_reward( + self, opponent: Player + ) -> Dict[Action, Dict[Action, Score]]: """ Finds the reward gained on the last iteration """ diff --git a/axelrod/strategies/rand.py b/axelrod/strategies/rand.py index 3987ac344..55791aa98 100644 --- a/axelrod/strategies/rand.py +++ b/axelrod/strategies/rand.py @@ -41,6 +41,7 @@ def __init__(self, p: float = 0.5) -> None: self.p = p def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return self._random.random_choice(self.p) def _post_init(self): diff --git a/axelrod/strategies/resurrection.py b/axelrod/strategies/resurrection.py index 76a61a355..a646751b1 100644 --- a/axelrod/strategies/resurrection.py +++ b/axelrod/strategies/resurrection.py @@ -29,6 +29,7 @@ class Resurrection(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return C if len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: @@ -61,6 +62,7 @@ class DoubleResurrection(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return C if len(self.history) >= 5 and self.history[-5:] == [C, C, C, C, C]: diff --git a/axelrod/strategies/revised_downing.py b/axelrod/strategies/revised_downing.py index a9ea057cd..6367f206b 100644 --- a/axelrod/strategies/revised_downing.py +++ b/axelrod/strategies/revised_downing.py @@ -45,6 +45,7 @@ def __init__(self) -> None: self.total_D = 0 # note the same as self.defections def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" round_number = len(self.history) + 1 if round_number == 1: @@ -67,7 +68,7 @@ def strategy(self, opponent: Player) -> Action: alt = 4.0 * self.good - 5.0 * self.bad - 1 if c >= 0 and c >= alt: move = C - elif (c >= 0 and c < alt) or (alt >= 0): + elif (0 <= c < alt) or (alt >= 0): move = self.history[-1].flip() else: move = D diff --git a/axelrod/strategies/selfsteem.py b/axelrod/strategies/selfsteem.py index 94131659f..9b7ea77e6 100644 --- a/axelrod/strategies/selfsteem.py +++ b/axelrod/strategies/selfsteem.py @@ -36,16 +36,17 @@ class SelfSteem(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" turns_number = len(self.history) sine_value = sin(2 * pi * turns_number / 10) if sine_value > 0.95: return D - if abs(sine_value) < 0.95 and abs(sine_value) > 0.3: + if 0.95 > abs(sine_value) > 0.3: return opponent.history[-1] - if sine_value < 0.3 and sine_value > -0.3: + if 0.3 > sine_value > -0.3: return self._random.random_choice() return C diff --git a/axelrod/strategies/sequence_player.py b/axelrod/strategies/sequence_player.py index 58c7b6d3f..061a7f0d9 100644 --- a/axelrod/strategies/sequence_player.py +++ b/axelrod/strategies/sequence_player.py @@ -34,6 +34,7 @@ def meta_strategy(value: int) -> Action: return C def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Iterate through the sequence and apply the meta strategy for s in self.sequence_generator: return self.meta_strategy(s) @@ -80,7 +81,7 @@ def __init__(self) -> None: class ThueMorseInverse(ThueMorse): - """ A player who plays the inverse of the Thue-Morse sequence. + """A player who plays the inverse of the Thue-Morse sequence. Names: diff --git a/axelrod/strategies/shortmem.py b/axelrod/strategies/shortmem.py index f110f56b8..5420d75ad 100644 --- a/axelrod/strategies/shortmem.py +++ b/axelrod/strategies/shortmem.py @@ -22,7 +22,7 @@ class ShortMem(Player): name = "ShortMem" classifier = { - "memory_depth": float('inf'), + "memory_depth": float("inf"), "stochastic": False, "long_run_time": False, "inspects_source": False, @@ -32,6 +32,7 @@ class ShortMem(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) <= 10: return C diff --git a/axelrod/strategies/stalker.py b/axelrod/strategies/stalker.py index 2eced3e89..087d4c7ec 100644 --- a/axelrod/strategies/stalker.py +++ b/axelrod/strategies/stalker.py @@ -55,6 +55,7 @@ def score_last_round(self, opponent: Player): self.current_score += scores[0] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return C diff --git a/axelrod/strategies/titfortat.py b/axelrod/strategies/titfortat.py index 31c20b639..787794a09 100644 --- a/axelrod/strategies/titfortat.py +++ b/axelrod/strategies/titfortat.py @@ -1,6 +1,9 @@ from axelrod.action import Action, actions_to_str from axelrod.player import Player -from axelrod.strategy_transformers import FinalTransformer, TrackHistoryTransformer +from axelrod.strategy_transformers import ( + FinalTransformer, + TrackHistoryTransformer, +) C, D = Action.C, Action.D @@ -71,6 +74,7 @@ class TitFor2Tats(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return D if opponent.history[-2:] == [D, D] else C @@ -95,6 +99,7 @@ class TwoTitsForTat(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return D if D in opponent.history[-2:] else C @@ -122,13 +127,16 @@ class DynamicTwoTitsForTat(Player): } def strategy(self, opponent): + """Actual strategy definition that determines player's action.""" # First move if not opponent.history: # Make sure we cooperate first turn return C if D in opponent.history[-2:]: # Probability of cooperating regardless - return self._random.random_choice(opponent.cooperations / len(opponent.history)) + return self._random.random_choice( + opponent.cooperations / len(opponent.history) + ) else: return C @@ -158,6 +166,7 @@ class Bully(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return C if opponent.history[-1:] == [D] else D @@ -180,6 +189,7 @@ class SneakyTitForTat(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) < 2: return C if D not in opponent.history: @@ -210,6 +220,7 @@ class SuspiciousTitForTat(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return C if opponent.history[-1:] == [C] else D @@ -235,6 +246,7 @@ class AntiTitForTat(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" return D if opponent.history[-1:] == [C] else C @@ -258,6 +270,7 @@ class HardTitForTat(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Cooperate on the first move if not opponent.history: return C @@ -289,6 +302,7 @@ class HardTitFor2Tats(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Cooperate on the first move if not opponent.history: return C @@ -302,13 +316,13 @@ def strategy(opponent: Player) -> Action: class OmegaTFT(Player): """OmegaTFT modifies Tit For Tat in two ways: - - checks for deadlock loops of alternating rounds of (C, D) and (D, C), - and attempting to break them - - uses a more sophisticated retaliation mechanism that is noise tolerant + - checks for deadlock loops of alternating rounds of (C, D) and (D, C), + and attempting to break them + - uses a more sophisticated retaliation mechanism that is noise tolerant - Names: + Names: - - OmegaTFT: [Slany2007]_ + - OmegaTFT: [Slany2007]_ """ name = "Omega TFT" @@ -331,6 +345,7 @@ def __init__( self.deadlock_counter = 0 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Cooperate on the first move if not self.history: return C @@ -389,7 +404,7 @@ class OriginalGradual(Player): Names: - Gradual: [Beaufils1997]_ - """ + """ name = "Original Gradual" classifier = { @@ -410,6 +425,7 @@ def __init__(self) -> None: self.punishment_limit = 0 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if self.calming: self.calming = False @@ -475,6 +491,7 @@ def __init__(self) -> None: self.punish_count = 0 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(self.history) == 0: return C @@ -523,6 +540,7 @@ def __init__(self): self._recorded_history = [] def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not opponent.history: return C @@ -590,6 +608,7 @@ def __init__(self, rate: float = 0.5) -> None: self.world = rate def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if len(opponent.history) == 0: return C @@ -631,6 +650,7 @@ def __init__(self) -> None: self.retaliating = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # First move if not self.history: return C @@ -668,6 +688,7 @@ class SlowTitForTwoTats2(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # Start with two cooperations if len(self.history) < 2: @@ -702,6 +723,7 @@ class Alexei(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not self.history: return C if opponent.history[-1] == D: @@ -736,9 +758,10 @@ def __init__(self): self.is_defector = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not self.history: return C - if not (self.is_defector) and opponent.defections >= 5: + if not self.is_defector and opponent.defections >= 5: self.is_defector = True if self.is_defector: return D @@ -791,6 +814,7 @@ def __init__(self, N: int = 3, M: int = 2) -> None: self.retaliate_count = 0 def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" # if opponent defected consecutively M times, start the retaliation if not self.M or opponent.history[-self.M :].count(D) == self.M: self.retaliate_count = self.N @@ -829,6 +853,7 @@ def __init__(self): self.is_defector = False def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" if not self.history: return C if self.is_defector: @@ -871,7 +896,7 @@ def __init__(self, p: float = 0.5) -> None: """ Parameters ---------- - p, float + p: float The probability to cooperate """ super().__init__() diff --git a/axelrod/strategies/verybad.py b/axelrod/strategies/verybad.py index 02f07efb6..747d15d61 100644 --- a/axelrod/strategies/verybad.py +++ b/axelrod/strategies/verybad.py @@ -32,6 +32,7 @@ class VeryBad(Player): @staticmethod def strategy(opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" total_moves = len(opponent.history) if total_moves < 3: diff --git a/axelrod/strategies/worse_and_worse.py b/axelrod/strategies/worse_and_worse.py index efa068a79..83173ea30 100644 --- a/axelrod/strategies/worse_and_worse.py +++ b/axelrod/strategies/worse_and_worse.py @@ -27,6 +27,7 @@ class WorseAndWorse(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" current_round = len(self.history) + 1 probability = 1 - current_round / 1000 return self._random.random_choice(probability) @@ -52,6 +53,7 @@ class KnowledgeableWorseAndWorse(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" current_round = len(self.history) + 1 expected_length = self.match_attributes["length"] probability = 1 - current_round / expected_length @@ -79,6 +81,7 @@ class WorseAndWorse2(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" current_round = len(self.history) + 1 if current_round == 1: @@ -112,6 +115,7 @@ class WorseAndWorse3(Player): } def strategy(self, opponent: Player) -> Action: + """Actual strategy definition that determines player's action.""" current_round = len(self.history) + 1 if current_round == 1: diff --git a/axelrod/strategies/zero_determinant.py b/axelrod/strategies/zero_determinant.py index 7b849eadb..89852ae20 100644 --- a/axelrod/strategies/zero_determinant.py +++ b/axelrod/strategies/zero_determinant.py @@ -151,7 +151,9 @@ class ZDExtort2v2(LRPlayer): name = "ZD-Extort-2 v2" - def __init__(self, phi: float = 1 / 8, s: float = 0.5, l: float = 1) -> None: + def __init__( + self, phi: float = 1 / 8, s: float = 0.5, l: float = 1 + ) -> None: super().__init__(phi, s, l) @@ -168,7 +170,9 @@ class ZDExtort3(LRPlayer): name = "ZD-Extort3" - def __init__(self, phi: float = 3 / 26, s: float = 1 / 3, l: float = 1) -> None: + def __init__( + self, phi: float = 3 / 26, s: float = 1 / 3, l: float = 1 + ) -> None: super().__init__(phi, s, l) @@ -185,7 +189,9 @@ class ZDExtort4(LRPlayer): name = "ZD-Extort-4" - def __init__(self, phi: float = 4 / 17, s: float = 0.25, l: float = 1) -> None: + def __init__( + self, phi: float = 4 / 17, s: float = 0.25, l: float = 1 + ) -> None: super().__init__(phi, s, l) @@ -200,7 +206,9 @@ class ZDGen2(LRPlayer): name = "ZD-GEN-2" - def __init__(self, phi: float = 1 / 8, s: float = 0.5, l: float = 3) -> None: + def __init__( + self, phi: float = 1 / 8, s: float = 0.5, l: float = 3 + ) -> None: super().__init__(phi, s, l) @@ -251,5 +259,7 @@ class ZDSet2(LRPlayer): name = "ZD-SET-2" - def __init__(self, phi: float = 1 / 4, s: float = 0.0, l: float = 2) -> None: + def __init__( + self, phi: float = 1 / 4, s: float = 0.0, l: float = 2 + ) -> None: super().__init__(phi, s, l) diff --git a/axelrod/strategy_transformers.py b/axelrod/strategy_transformers.py index a7297382d..70773ff60 100644 --- a/axelrod/strategy_transformers.py +++ b/axelrod/strategy_transformers.py @@ -107,20 +107,30 @@ def __call__(self, PlayerClass): reclassifiers = PlayerClass._reclassifiers.copy() if reclassifier is not None: - reclassifiers.append((makes_use_of_reclassifier, (PlayerClass, strategy_wrapper), {})) + reclassifiers.append( + ( + makes_use_of_reclassifier, + (PlayerClass, strategy_wrapper), + {}, + ) + ) # This one is second on the assumption that the wrapper reclassifier knows best. reclassifiers.append((reclassifier, args, kwargs)) # First handle the case where the strategy method is static. if is_strategy_static(PlayerClass): + def inner_strategy(self, opponent): return PlayerClass.strategy(opponent) + else: + def inner_strategy(self, opponent): return PlayerClass.strategy(self, opponent) # For the dual wrapper, we flip the history before and after the transform. if strategy_wrapper == dual_wrapper: + def dual_inner_strategy(self, opponent): """The dual wrapper requires flipping the history. It may be more efficient to use a custom History class that tracks a flipped history and swaps labels.""" @@ -128,16 +138,19 @@ def dual_inner_strategy(self, opponent): proposed_action = inner_strategy(self, opponent) self._history = self.history.flip_plays() return proposed_action + outer_strategy = dual_inner_strategy # For the JossAnn transformer, we want to avoid calling the wrapped strategy, # in the cases where it is unnecessary, to avoid affecting stochasticity. elif strategy_wrapper == joss_ann_wrapper: + def joss_ann_inner_strategy(self, opponent): if not self.classifier["stochastic"]: proposed_action = C else: proposed_action = inner_strategy(self, opponent) return proposed_action + outer_strategy = joss_ann_inner_strategy else: outer_strategy = inner_strategy @@ -727,13 +740,15 @@ def __call__(self, player, opponent, action, retaliations): def retailiation_reclassifier(original_classifier, retaliations): if retaliations > 0: original_classifier["memory_depth"] = max( - retaliations, original_classifier["memory_depth"]) + retaliations, original_classifier["memory_depth"] + ) return original_classifier RetaliationTransformer = StrategyTransformerFactory( - RetaliationWrapper(), name_prefix="Retaliating", - reclassifier=retailiation_reclassifier + RetaliationWrapper(), + name_prefix="Retaliating", + reclassifier=retailiation_reclassifier, ) @@ -751,11 +766,13 @@ def __call__(self, player, opponent, action): def rua_reclassifier(original_classifier): original_classifier["memory_depth"] = max( - 1, original_classifier["memory_depth"]) + 1, original_classifier["memory_depth"] + ) return original_classifier RetaliateUntilApologyTransformer = StrategyTransformerFactory( - RetaliationUntilApologyWrapper(), name_prefix="RUA", - reclassifier=rua_reclassifier + RetaliationUntilApologyWrapper(), + name_prefix="RUA", + reclassifier=rua_reclassifier, ) diff --git a/axelrod/tests/integration/test_filtering.py b/axelrod/tests/integration/test_filtering.py index 43226caf6..e8d99218e 100644 --- a/axelrod/tests/integration/test_filtering.py +++ b/axelrod/tests/integration/test_filtering.py @@ -4,7 +4,7 @@ import axelrod as axl from axelrod.tests.property import strategy_lists from hypothesis import example, given, settings -from hypothesis.strategies import integers +from hypothesis.strategies import data, integers, lists, sampled_from class TestFiltersAgainstComprehensions(unittest.TestCase): @@ -21,11 +21,13 @@ def tearDown(self) -> None: warnings.simplefilter("default", category=UserWarning) @settings(deadline=None) - @given(strategies=strategy_lists(min_size=20, max_size=20)) - @example(strategies=[axl.DBS, axl.Cooperator]) - def test_boolean_filtering(self, strategies): + @given( + strategies=strategy_lists(min_size=20, max_size=20), + hypothesis_selector=data(), + ) + def test_boolean_filtering(self, strategies, hypothesis_selector): - classifiers = [ + classifier_list = [ "stochastic", "long_run_time", "manipulates_state", @@ -33,10 +35,24 @@ def test_boolean_filtering(self, strategies): "inspects_source", ] + classifiers = hypothesis_selector.draw( + lists( + sampled_from(classifier_list), + min_size=1, + max_size=len(classifier_list), + unique=True, + ) + ) + + comprehension, filterset = strategies, {} for classifier in classifiers: - comprehension = set(filter(axl.Classifiers[classifier], strategies)) - filterset = {classifier: True} - filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) + comprehension = set( + filter(axl.Classifiers[classifier], strategies) + ) & set(comprehension) + filterset[classifier] = True + filtered = set( + axl.filtered_strategies(filterset, strategies=strategies) + ) self.assertEqual(comprehension, filtered) @given( @@ -90,7 +106,9 @@ def test_memory_depth_filtering( ] ) filterset = {"memory_depth": memory_depth} - filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) + filtered = set( + axl.filtered_strategies(filterset, strategies=strategies) + ) self.assertEqual(comprehension, filtered) @given(strategies=strategy_lists(min_size=20, max_size=20)) @@ -106,12 +124,16 @@ def test_makes_use_of_filtering(self, strategies): [ s for s in strategies - if set(classifier).issubset(set(axl.Classifiers["makes_use_of"](s))) + if set(classifier).issubset( + set(axl.Classifiers["makes_use_of"](s)) + ) ] ) filterset = {"makes_use_of": classifier} - filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) + filtered = set( + axl.filtered_strategies(filterset, strategies=strategies) + ) self.assertEqual( comprehension, filtered, msg="classifier: {}".format(classifier) diff --git a/axelrod/tests/integration/test_matches.py b/axelrod/tests/integration/test_matches.py index d46ded024..46f4e318f 100644 --- a/axelrod/tests/integration/test_matches.py +++ b/axelrod/tests/integration/test_matches.py @@ -9,10 +9,14 @@ C, D = axl.Action.C, axl.Action.D deterministic_strategies = [ - s for s in axl.short_run_time_strategies if not axl.Classifiers["stochastic"](s()) + s + for s in axl.short_run_time_strategies + if not axl.Classifiers["stochastic"](s()) ] stochastic_strategies = [ - s for s in axl.short_run_time_strategies if axl.Classifiers["stochastic"](s()) + s + for s in axl.short_run_time_strategies + if axl.Classifiers["stochastic"](s()) ] diff --git a/axelrod/tests/integration/test_names.py b/axelrod/tests/integration/test_names.py index 56061b959..ad4ff85af 100644 --- a/axelrod/tests/integration/test_names.py +++ b/axelrod/tests/integration/test_names.py @@ -5,7 +5,7 @@ class TestNames(unittest.TestCase): def test_all_strategies_have_names(self): - names = [s.name for s in axl.all_strategies if s.name != 'Player'] + names = [s.name for s in axl.all_strategies if s.name != "Player"] self.assertEqual(len(names), len(axl.all_strategies)) def test_all_names_are_unique(self): diff --git a/axelrod/tests/integration/test_tournament.py b/axelrod/tests/integration/test_tournament.py index d2d26cf56..8568e165d 100644 --- a/axelrod/tests/integration/test_tournament.py +++ b/axelrod/tests/integration/test_tournament.py @@ -51,7 +51,9 @@ def test_big_tournaments(self, tournament): path = pathlib.Path("test_outputs/test_tournament.csv") filename = axl_filename(path) self.assertIsNone( - tournament.play(progress_bar=False, filename=filename, build_results=False) + tournament.play( + progress_bar=False, filename=filename, build_results=False + ) ) def test_serial_play(self): @@ -94,9 +96,13 @@ def test_repeat_tournament_deterministic(self): turns=2, repetitions=2, ) - path = pathlib.Path("test_outputs/stochastic_tournament_{}.csv".format(_)) + path = pathlib.Path( + "test_outputs/stochastic_tournament_{}.csv".format(_) + ) files.append(axl_filename(path)) - tournament.play(progress_bar=False, filename=files[-1], build_results=False) + tournament.play( + progress_bar=False, filename=files[-1], build_results=False + ) self.assertTrue(filecmp.cmp(files[0], files[1])) def test_repeat_tournament_stochastic(self): @@ -116,11 +122,15 @@ def test_repeat_tournament_stochastic(self): game=self.game, turns=2, repetitions=2, - seed=17 + seed=17, + ) + path = pathlib.Path( + "test_outputs/stochastic_tournament_{}.csv".format(_) ) - path = pathlib.Path("test_outputs/stochastic_tournament_{}.csv".format(_)) files.append(axl_filename(path)) - tournament.play(progress_bar=False, filename=files[-1], build_results=False) + tournament.play( + progress_bar=False, filename=files[-1], build_results=False + ) self.assertTrue(filecmp.cmp(files[0], files[1])) @@ -162,7 +172,9 @@ def test_matches_have_different_length(self): p2 = axl.Cooperator() p3 = axl.Cooperator() players = [p1, p2, p3] - tournament = axl.Tournament(players, prob_end=0.5, repetitions=2, seed=3) + tournament = axl.Tournament( + players, prob_end=0.5, repetitions=2, seed=3 + ) results = tournament.play(progress_bar=False) # Check that match length are different across the repetitions self.assertNotEqual(results.match_lengths[0], results.match_lengths[1]) diff --git a/axelrod/tests/property.py b/axelrod/tests/property.py index c3dc009cc..3b674c71c 100644 --- a/axelrod/tests/property.py +++ b/axelrod/tests/property.py @@ -4,12 +4,21 @@ import itertools import axelrod as axl -from hypothesis.strategies import composite, floats, integers, lists, sampled_from +from hypothesis.strategies import ( + composite, + floats, + integers, + lists, + sampled_from, +) @composite def strategy_lists( - draw, strategies=axl.short_run_time_strategies, min_size=1, max_size=len(axl.strategies) + draw, + strategies=axl.short_run_time_strategies, + min_size=1, + max_size=len(axl.strategies), ): """ A hypothesis decorator to return a list of strategies @@ -106,14 +115,20 @@ def tournaments( The maximum number of repetitions """ strategies = draw( - strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) + strategy_lists( + strategies=strategies, min_size=min_size, max_size=max_size + ) ) players = [s() for s in strategies] turns = draw(integers(min_value=min_turns, max_value=max_turns)) - repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) + repetitions = draw( + integers(min_value=min_repetitions, max_value=max_repetitions) + ) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament(players, turns=turns, repetitions=repetitions, noise=noise) + tournament = axl.Tournament( + players, turns=turns, repetitions=repetitions, noise=noise + ) return tournament @@ -129,7 +144,7 @@ def prob_end_tournaments( max_noise=1, min_repetitions=1, max_repetitions=20, - seed=None + seed=None, ): """ A hypothesis decorator to return a tournament, @@ -158,15 +173,23 @@ def prob_end_tournaments( Random seed """ strategies = draw( - strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) + strategy_lists( + strategies=strategies, min_size=min_size, max_size=max_size + ) ) players = [s() for s in strategies] prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end)) - repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) + repetitions = draw( + integers(min_value=min_repetitions, max_value=max_repetitions) + ) noise = draw(floats(min_value=min_noise, max_value=max_noise)) tournament = axl.Tournament( - players, prob_end=prob_end, repetitions=repetitions, noise=noise, seed=seed + players, + prob_end=prob_end, + repetitions=repetitions, + noise=noise, + seed=seed, ) return tournament @@ -209,7 +232,9 @@ def spatial_tournaments( The maximum number of repetitions """ strategies = draw( - strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) + strategy_lists( + strategies=strategies, min_size=min_size, max_size=max_size + ) ) players = [s() for s in strategies] player_indices = list(range(len(players))) @@ -225,13 +250,17 @@ def spatial_tournaments( # Ensure all players/nodes are connected: node_indices = sorted(set([node for edge in edges for node in edge])) - missing_nodes = [index for index in player_indices if index not in node_indices] + missing_nodes = [ + index for index in player_indices if index not in node_indices + ] for index in missing_nodes: opponent = draw(sampled_from(player_indices)) edges.append((index, opponent)) turns = draw(integers(min_value=min_turns, max_value=max_turns)) - repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) + repetitions = draw( + integers(min_value=min_repetitions, max_value=max_repetitions) + ) noise = draw(floats(min_value=min_noise, max_value=max_noise)) tournament = axl.Tournament( @@ -278,7 +307,9 @@ def prob_end_spatial_tournaments( The maximum number of repetitions """ strategies = draw( - strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) + strategy_lists( + strategies=strategies, min_size=min_size, max_size=max_size + ) ) players = [s() for s in strategies] player_indices = list(range(len(players))) @@ -294,17 +325,25 @@ def prob_end_spatial_tournaments( # Ensure all players/nodes are connected: node_indices = sorted(set([node for edge in edges for node in edge])) - missing_nodes = [index for index in player_indices if index not in node_indices] + missing_nodes = [ + index for index in player_indices if index not in node_indices + ] for index in missing_nodes: opponent = draw(sampled_from(player_indices)) edges.append((index, opponent)) prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end)) - repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) + repetitions = draw( + integers(min_value=min_repetitions, max_value=max_repetitions) + ) noise = draw(floats(min_value=min_noise, max_value=max_noise)) tournament = axl.Tournament( - players, prob_end=prob_end, repetitions=repetitions, noise=noise, edges=edges + players, + prob_end=prob_end, + repetitions=repetitions, + noise=noise, + edges=edges, ) return tournament diff --git a/axelrod/tests/strategies/test_adaptive.py b/axelrod/tests/strategies/test_adaptive.py index 20befc067..8ac1df998 100644 --- a/axelrod/tests/strategies/test_adaptive.py +++ b/axelrod/tests/strategies/test_adaptive.py @@ -14,7 +14,7 @@ class TestAdaptive(TestPlayer): expected_classifier = { "memory_depth": float("inf"), "stochastic": False, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -54,7 +54,9 @@ def test_scoring_with_default_game(self): opponent = axl.Cooperator() attrs = {"scores": {C: 3, D: 0}} expected_actions = list(zip([C, C], [C, C])) - self.versus_test(opponent, expected_actions, turns=2, attrs=attrs, seed=9) + self.versus_test( + opponent, expected_actions, turns=2, attrs=attrs, seed=9 + ) def test_scoring_with_alternate_game(self): """Tests that the alternate game is used in scoring.""" @@ -63,5 +65,11 @@ def test_scoring_with_alternate_game(self): expected_actions = list(zip([C, C, C], [C, D, C])) attrs = {"scores": {C: 7, D: 0}} match_attributes = {"game": axl.Game(-3, 10, 10, 10)} - self.versus_test(opponent, expected_actions, turns=3, attrs=attrs, seed=9, - match_attributes=match_attributes) + self.versus_test( + opponent, + expected_actions, + turns=3, + attrs=attrs, + seed=9, + match_attributes=match_attributes, + ) diff --git a/axelrod/tests/strategies/test_ann.py b/axelrod/tests/strategies/test_ann.py index 985462e34..c2a8290fe 100644 --- a/axelrod/tests/strategies/test_ann.py +++ b/axelrod/tests/strategies/test_ann.py @@ -18,6 +18,7 @@ class TestSplitWeights(unittest.TestCase): def test_split_weights(self): with self.assertRaises(ValueError): split_weights([0] * 20, 12, 10) + # Doesn't Raise split_weights([0] * 70, 5, 10) split_weights([0] * 12, 10, 1) @@ -130,7 +131,7 @@ class TestEvolvableANN3(TestEvolvablePlayer): init_parameters = { "num_features": nn_weights["Evolved ANN 5"][0], "num_hidden": nn_weights["Evolved ANN 5"][1], - "weights": nn_weights["Evolved ANN 5"][2] + "weights": nn_weights["Evolved ANN 5"][2], } @@ -139,7 +140,7 @@ class TestEvolvableANN3(TestEvolvablePlayer): axl.EvolvableANN, num_features=num_features, num_hidden=num_hidden, - weights=weights + weights=weights, ) diff --git a/axelrod/tests/strategies/test_apavlov.py b/axelrod/tests/strategies/test_apavlov.py index 8d3288a93..3b37f1278 100644 --- a/axelrod/tests/strategies/test_apavlov.py +++ b/axelrod/tests/strategies/test_apavlov.py @@ -34,7 +34,9 @@ def test_strategy_versus_mock_player(self): opponent = axl.MockPlayer(actions=[C] * 6 + [D]) actions = [(C, C)] * 6 + [(C, D), (D, C)] self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Cooperative"} + opponent, + expected_actions=actions, + attrs={"opponent_class": "Cooperative"}, ) def test_strategy_versus_defector(self): @@ -70,30 +72,47 @@ def test_strategy_PavlovD(self): opponent = axl.Cycler(cycle="DDC") actions = [(C, D), (D, D), (D, C), (C, D), (D, D), (D, C), (D, D)] self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "PavlovD"} + opponent, + expected_actions=actions, + attrs={"opponent_class": "PavlovD"}, ) def test_strategy_PavlovD2(self): """Tests that PavolvD is identified by DDCDDC and that the response is D then C""" opponent = axl.MockPlayer(actions=[D, D, C, D, D, C, D]) - actions = [(C, D), (D, D), (D, C), (C, D), (D, D), (D, C), (D, D), (C, D)] + actions = [ + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, C), + (D, D), + (C, D), + ] self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "PavlovD"} + opponent, + expected_actions=actions, + attrs={"opponent_class": "PavlovD"}, ) def test_strategy_random(self): opponent = axl.MockPlayer(actions=[C, C, C, D, D, D]) actions = [(C, C), (C, C), (C, C), (C, D), (D, D), (D, D), (D, C)] self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Random"} + opponent, + expected_actions=actions, + attrs={"opponent_class": "Random"}, ) def test_strategy_random2(self): opponent = axl.MockPlayer(actions=[D, D, D, C, C, C]) actions = [(C, D), (D, D), (D, D), (D, C), (C, C), (C, C), (D, D)] self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Random"} + opponent, + expected_actions=actions, + attrs={"opponent_class": "Random"}, ) @@ -143,42 +162,100 @@ def test_strategy_defector3(self): def test_strategy_defector4(self): opponent = axl.MockPlayer(actions=[C, D, D, C, D, D, D]) - actions = [(C, C), (C, D), (D, D), (D, C), (C, D), (D, D), (D, D), (D, C)] + actions = [ + (C, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, D), + (D, C), + ] self.versus_test( opponent, expected_actions=actions, attrs={"opponent_class": "ALLD"} ) def test_strategy_stft(self): opponent = axl.MockPlayer(actions=[C, D, D, C, C, D, D]) - actions = [(C, C), (C, D), (D, D), (D, C), (C, C), (C, D), (C, D), (D, C)] + actions = [ + (C, C), + (C, D), + (D, D), + (D, C), + (C, C), + (C, D), + (C, D), + (D, C), + ] self.versus_test( opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} ) def test_strategy_stft2(self): opponent = axl.MockPlayer(actions=[C, D, C, D, C, D, D]) - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (C, D), (D, C)] + actions = [ + (C, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (C, D), + (D, C), + ] self.versus_test( opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} ) def test_strategy_stft3(self): opponent = axl.MockPlayer(actions=[D, D, D, C, C, C, C]) - actions = [(C, D), (D, D), (D, D), (D, C), (C, C), (C, C), (C, C), (C, D)] + actions = [ + (C, D), + (D, D), + (D, D), + (D, C), + (C, C), + (C, C), + (C, C), + (C, D), + ] self.versus_test( opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} ) def test_strategy_random(self): opponent = axl.MockPlayer(actions=[C, C, C, C, D, D]) - actions = [(C, C), (C, C), (C, C), (C, C), (C, D), (D, D), (D, C), (D, C)] + actions = [ + (C, C), + (C, C), + (C, C), + (C, C), + (C, D), + (D, D), + (D, C), + (D, C), + ] self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Random"} + opponent, + expected_actions=actions, + attrs={"opponent_class": "Random"}, ) def test_strategy_random2(self): opponent = axl.MockPlayer(actions=[D, D, C, C, C, C]) - actions = [(C, D), (D, D), (D, C), (C, C), (C, C), (C, C), (D, D), (D, D)] + actions = [ + (C, D), + (D, D), + (D, C), + (C, C), + (C, C), + (C, C), + (D, D), + (D, D), + ] self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Random"} + opponent, + expected_actions=actions, + attrs={"opponent_class": "Random"}, ) diff --git a/axelrod/tests/strategies/test_axelrod_first.py b/axelrod/tests/strategies/test_axelrod_first.py index 40f09e80c..dfb1bd0b9 100644 --- a/axelrod/tests/strategies/test_axelrod_first.py +++ b/axelrod/tests/strategies/test_axelrod_first.py @@ -107,7 +107,9 @@ class TestFirstByFeld(TestPlayer): def test_cooperation_probability(self): # Test cooperation probabilities - p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.8, rounds_of_decay=100) + p1 = self.player( + start_coop_prob=1.0, end_coop_prob=0.8, rounds_of_decay=100 + ) self.assertEqual(1.0, p1._cooperation_probability()) p2 = axl.Cooperator() match = axl.Match((p1, p2), turns=50) @@ -118,7 +120,9 @@ def test_cooperation_probability(self): self.assertEqual(0.8, p1._cooperation_probability()) # Test cooperation probabilities, second set of params - p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200) + p1 = self.player( + start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200 + ) self.assertEqual(1.0, p1._cooperation_probability()) match = axl.Match((p1, p2), turns=100) match.play() @@ -131,10 +135,14 @@ def test_decay(self): # Test beyond 200 rounds for opponent in [axl.Cooperator(), axl.Defector()]: player = self.player() - self.assertEqual(player._cooperation_probability(), player._start_coop_prob) + self.assertEqual( + player._cooperation_probability(), player._start_coop_prob + ) match = axl.Match((player, opponent), turns=201) match.play() - self.assertEqual(player._cooperation_probability(), player._end_coop_prob) + self.assertEqual( + player._cooperation_probability(), player._end_coop_prob + ) def test_stochastic_behavior(self): actions = [(C, C)] * 13 + [(D, C)] @@ -176,19 +184,28 @@ def test_strategy(self): actions += [(D, C)] # 51 turns actions += [(C, D), (D, C)] * 2 + [(C, D)] # 56 turns self.versus_test( - axl.Alternator(), expected_actions=actions, attrs=expected_attrs, seed=0 + axl.Alternator(), + expected_actions=actions, + attrs=expected_attrs, + seed=0, ) # Against defector actions = [(C, D)] + [(D, D)] * 55 # 56 turns self.versus_test( - axl.Defector(), expected_actions=actions, attrs=expected_attrs, seed=0 + axl.Defector(), + expected_actions=actions, + attrs=expected_attrs, + seed=0, ) # Against cooperator actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 5 self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs=expected_attrs, seed=0 + axl.Cooperator(), + expected_actions=actions, + attrs=expected_attrs, + seed=0, ) # Test recognition of random player @@ -198,12 +215,21 @@ def test_strategy(self): } actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 5 # 56 turns self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs=expected_attrs, seed=1 + axl.Cooperator(), + expected_actions=actions, + attrs=expected_attrs, + seed=1, ) - expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 63} + expected_attrs = { + "opponent_is_random": False, + "next_random_defection_turn": 63, + } actions += [(C, C)] # 57 turns self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs=expected_attrs, seed=8 + axl.Cooperator(), + expected_actions=actions, + attrs=expected_attrs, + seed=8, ) expected_attrs = { @@ -215,11 +241,17 @@ def test_strategy(self): actions += [(C, D), (D, C)] * 3 # 57 turns actions += [(D, D)] self.versus_test( - axl.Alternator(), expected_actions=actions, attrs=expected_attrs, seed=3 + axl.Alternator(), + expected_actions=actions, + attrs=expected_attrs, + seed=3, ) actions += [(D, C), (D, D)] * 5 self.versus_test( - axl.Alternator(), expected_actions=actions, attrs=expected_attrs, seed=4 + axl.Alternator(), + expected_actions=actions, + attrs=expected_attrs, + seed=4, ) # Test versus TfT @@ -231,20 +263,35 @@ def test_strategy(self): actions += [(C, D), (D, C)] * 3 # 56 turns actions += [(C, D), (D, C)] * 50 self.versus_test( - axl.TitForTat(), expected_actions=actions, seed=0, attrs=expected_attrs + axl.TitForTat(), + expected_actions=actions, + seed=0, + attrs=expected_attrs, ) # Test random defections - expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 76} + expected_attrs = { + "opponent_is_random": False, + "next_random_defection_turn": 76, + } actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 15 + [(D, C)] + [(C, C)] self.versus_test( - axl.Cooperator(), expected_actions=actions, seed=0, attrs=expected_attrs + axl.Cooperator(), + expected_actions=actions, + seed=0, + attrs=expected_attrs, ) - expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 79} + expected_attrs = { + "opponent_is_random": False, + "next_random_defection_turn": 79, + } actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 14 + [(D, C)] + [(C, C)] self.versus_test( - axl.Cooperator(), expected_actions=actions, seed=5, attrs=expected_attrs + axl.Cooperator(), + expected_actions=actions, + seed=5, + attrs=expected_attrs, ) @@ -270,11 +317,29 @@ def test_strategy(self): self.versus_test(axl.Alternator(), expected_actions=actions, seed=0) opponent = axl.MockPlayer(actions=[D] * 8) - actions = [(C, D), (C, D), (D, D), (C, D), (D, D), (C, D), (D, D), (C, D)] + actions = [ + (C, D), + (C, D), + (D, D), + (C, D), + (D, D), + (C, D), + (D, D), + (C, D), + ] self.versus_test(opponent, expected_actions=actions, seed=1) opponent = axl.MockPlayer(actions=[D] * 8) - actions = [(C, D), (D, D), (C, D), (D, D), (C, D), (D, D), (C, D), (C, D)] + actions = [ + (C, D), + (D, D), + (C, D), + (D, D), + (C, D), + (D, D), + (C, D), + (C, D), + ] self.versus_test(opponent, expected_actions=actions, seed=2) @@ -349,14 +414,41 @@ def test_strategy(self): actions = [(C, C)] * 9 self.versus_test(axl.Cooperator(), expected_actions=actions) - actions = [(C, D), (D, D), (D, D), (C, D), (C, D), (C, D), (C, D), (C, D)] + actions = [ + (C, D), + (D, D), + (D, D), + (C, D), + (C, D), + (C, D), + (C, D), + (C, D), + ] self.versus_test(axl.Defector(), expected_actions=actions) - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C), (C, D)] + actions = [ + (C, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + ] self.versus_test(axl.Alternator(), expected_actions=actions) opponent = axl.MockPlayer(actions=[D, C]) - actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + actions = [ + (C, D), + (D, C), + (D, D), + (D, C), + (D, D), + (D, C), + (D, D), + (D, C), + ] self.versus_test(opponent, expected_actions=actions) @@ -437,7 +529,9 @@ def test_strategy(self): # Test beyond 10 rounds opponent = axl.MockPlayer(actions=[D] * 5 + [C] * 5 + [C, D] * 5) - actions = [(C, D)] * 5 + [(C, C)] * 6 + [(D, D)] + [(D, C), (C, D), (C, C)] + actions = ( + [(C, D)] * 5 + [(C, C)] * 6 + [(D, D)] + [(D, C), (C, D), (C, C)] + ) self.versus_test(opponent, expected_actions=actions, seed=20) @@ -494,7 +588,9 @@ def test_strategy(self): opponent = axl.Cooperator() actions = [(C, C)] * 17 + [(D, C)] * 2 self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_is_random": False} + opponent, + expected_actions=actions, + attrs={"opponent_is_random": False}, ) actions = actions[:-2] + [(C, C)] * 2 @@ -510,7 +606,9 @@ def test_strategy(self): opponent = axl.Defector() actions = [(C, D)] * 4 + [(D, D)] * 15 self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_is_random": False} + opponent, + expected_actions=actions, + attrs={"opponent_is_random": False}, ) # SteinAndRapoport vs Alternator @@ -525,7 +623,9 @@ def test_strategy(self): actions += [(D, D), (D, C), (D, D), (D, C)] self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_is_random": True} + opponent, + expected_actions=actions, + attrs={"opponent_is_random": True}, ) # The test is carried out again every 15 rounds. @@ -570,8 +670,11 @@ def test_strategy(self): # Cooperator Test does noot defect if game length is unknown opponent = axl.Cooperator() actions = [(C, C), (C, C), (C, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, - match_attributes={"length": float("inf")}) + self.versus_test( + opponent, + expected_actions=actions, + match_attributes={"length": float("inf")}, + ) # Defector Test opponent = axl.Defector() diff --git a/axelrod/tests/strategies/test_axelrod_second.py b/axelrod/tests/strategies/test_axelrod_second.py index 26fa8078e..516ee1f3a 100644 --- a/axelrod/tests/strategies/test_axelrod_second.py +++ b/axelrod/tests/strategies/test_axelrod_second.py @@ -112,7 +112,9 @@ def test_strategy(self): # Now play TfT opponent = axl.MockPlayer(actions=[C, D, C, D, D, C]) actions = [(D, C), (C, D), (C, C), (C, D), (D, D), (D, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, attrs={"is_TFT": True}) + self.versus_test( + opponent, expected_actions=actions, attrs={"is_TFT": True} + ) class TestGladstein(TestPlayer): @@ -151,11 +153,15 @@ def test_strategy(self): # Ratio is 1/3 when MockPlayer defected for the first time. opponent = axl.MockPlayer(actions=[C, C, C, D, D]) actions = [(D, C), (C, C), (C, C), (D, D), (C, D)] - self.versus_test(opponent, expected_actions=actions, attrs={"patsy": False}) + self.versus_test( + opponent, expected_actions=actions, attrs={"patsy": False} + ) opponent = axl.AntiTitForTat() actions = [(D, C), (C, C), (C, D), (C, D), (D, D)] - self.versus_test(opponent, expected_actions=actions, attrs={"patsy": False}) + self.versus_test( + opponent, expected_actions=actions, attrs={"patsy": False} + ) class TestTranquilizer(TestPlayer): @@ -184,7 +190,16 @@ def test_initialised_variables(self): def test_strategy(self): opponent = axl.Bully() - actions = [(C, D), (D, D), (D, C), (C, C), (C, D), (D, D), (D, C), (C, C)] + actions = [ + (C, D), + (D, D), + (D, C), + (C, C), + (C, D), + (D, D), + (D, C), + (C, C), + ] expected_attrs = { "num_turns_after_good_defection": 0, "one_turn_after_good_defection_ratio": 5, @@ -192,7 +207,9 @@ def test_strategy(self): "one_turn_after_good_defection_ratio_count": 1, "two_turns_after_good_defection_ratio_count": 1, } - self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) + self.versus_test( + opponent, expected_actions=actions, attrs=expected_attrs + ) # Tests whether TitForTat is played given score is below 1.75 @@ -205,10 +222,14 @@ def test_strategy(self): "one_turn_after_good_defection_ratio_count": 1, "two_turns_after_good_defection_ratio_count": 1, } - self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) + self.versus_test( + opponent, expected_actions=actions, attrs=expected_attrs + ) opponent = axl.MockPlayer([C] * 2 + [D] * 8 + [C] * 4) - actions = [(C, C), (C, C)] + [(C, D)] + [(D, D)] * 7 + [(D, C)] + [(C, C)] * 3 + actions = ( + [(C, C), (C, C)] + [(C, D)] + [(D, D)] * 7 + [(D, C)] + [(C, C)] * 3 + ) expected_attrs = { "num_turns_after_good_defection": 0, "one_turn_after_good_defection_ratio": 5, @@ -216,7 +237,9 @@ def test_strategy(self): "one_turn_after_good_defection_ratio_count": 1, "two_turns_after_good_defection_ratio_count": 1, } - self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) + self.versus_test( + opponent, expected_actions=actions, attrs=expected_attrs + ) def test_strategy2(self): # If score is between 1.75 and 2.25, may cooperate or defect @@ -251,7 +274,7 @@ def test_strategy3(self): def test_strategy4(self): """If score is greater than 2.25 either cooperate or defect, - if turn number <= 5; cooperate""" + if turn number <= 5; cooperate""" opponent = axl.MockPlayer(actions=[C] * 5) actions = [(C, C)] * 5 @@ -281,8 +304,8 @@ def test_strategy5(self): ) def test_strategy6(self): - """ Given score per turn is greater than 2.25, - Tranquilizer will never defect twice in a row""" + """Given score per turn is greater than 2.25, + Tranquilizer will never defect twice in a row""" opponent = axl.MockPlayer(actions=[C] * 6) actions = [(C, C)] * 4 + [(D, C), (C, C)] @@ -356,7 +379,9 @@ def test_strategy10(self): "one_turn_after_good_defection_ratio_count": 1, "two_turns_after_good_defection_ratio_count": 1, } - self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) + self.versus_test( + opponent, expected_actions=actions, attrs=expected_attrs + ) class TestGrofman(TestPlayer): @@ -603,7 +628,9 @@ def test_strategy(self): # Tries to cooperate every third time until detecting defective actions = ( - [(C, D), (D, D), (D, D), (D, D)] * 6 + [(C, D), (D, D)] + [(D, D)] * 100 + [(C, D), (D, D), (D, D), (D, D)] * 6 + + [(C, D), (D, D)] + + [(D, D)] * 100 ) self.versus_test(axl.Defector(), expected_actions=actions) @@ -656,7 +683,9 @@ def test_stochastic_behavior(self): """Test random responses on turns 1 through 17.""" # Use an explicit match because the random behavior in turns 1 through 17 # makes finding seeds reproducibly difficult. - match = axl.Match((axl.SecondByCave(), axl.Alternator()), turns=30, seed=1) + match = axl.Match( + (axl.SecondByCave(), axl.Alternator()), turns=30, seed=1 + ) match.play() player_history = [round[0] for round in match.result] self.assertTrue(C in player_history[1:17]) @@ -667,7 +696,9 @@ def test_serial_defection_against_defector(self): to respond D->D.""" # Use an explicit match because the random behavior in turns 1 through 17 # makes finding seeds reproducibly difficult. - match = axl.Match((axl.SecondByCave(), axl.Defector()), turns=30, seed=1) + match = axl.Match( + (axl.SecondByCave(), axl.Defector()), turns=30, seed=1 + ) result = match.play() self.assertEqual(result[0], (C, D)) self.assertEqual(result[18:], [(D, D)] * 12) @@ -679,7 +710,9 @@ def test_serial_defection_against_mostly_defector(self): # makes finding seeds reproducibly difficult. opponent_actions = [D] * 17 + [C, C, C, C] almost_defector = axl.MockPlayer(actions=opponent_actions) - match = axl.Match((axl.SecondByCave(), almost_defector), turns=21, seed=1) + match = axl.Match( + (axl.SecondByCave(), almost_defector), turns=21, seed=1 + ) result = match.play() self.assertEqual(result[0], (C, D)) self.assertEqual(result[-3], (C, C)) @@ -690,10 +723,12 @@ def test_versus_alternator(self): random and defect.""" # Use an explicit match because the random behavior in turns 1 through 17 # makes finding seeds reproducibly difficult. - match = axl.Match((axl.SecondByCave(), axl.Alternator()), turns=100, seed=1) + match = axl.Match( + (axl.SecondByCave(), axl.Alternator()), turns=100, seed=1 + ) result = match.play() self.assertEqual(result[0], (C, C)) - self.assertEqual(result[37: 40], [(C, D), (D, C), (D, D)]) + self.assertEqual(result[37:40], [(C, D), (D, C), (D, D)]) self.assertEqual(result[40:], [(D, C), (D, D)] * 30) @@ -789,7 +824,7 @@ class TestGraaskampKatzen(TestPlayer): expected_classifier = { "memory_depth": float("inf"), "stochastic": False, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -923,7 +958,10 @@ def test_exit_fair_weather(self): # Immediately exit Fair-weather actions += [(D, C), (C, C), (D, C), (C, C)] self.versus_test( - Defect37_big, expected_actions=actions, seed=10, attrs={"mode": "Normal"} + Defect37_big, + expected_actions=actions, + seed=10, + attrs={"mode": "Normal"}, ) def test_exit_fair_weather2(self): @@ -934,7 +972,10 @@ def test_exit_fair_weather2(self): opponent_actions = [C] * 36 + [D] + [C] * 100 + [D] + [C] * 4 Defect37_big = axl.MockPlayer(actions=opponent_actions) self.versus_test( - Defect37_big, expected_actions=actions, seed=1, attrs={"mode": "Normal"} + Defect37_big, + expected_actions=actions, + seed=1, + attrs={"mode": "Normal"}, ) def test_non_fair_weather(self): @@ -1015,8 +1056,10 @@ def test_parity_limit_shortening(self): # Now hit the limit sooner actions += [(C, D), (D, C), (C, D), (C, C)] * 5 self.versus_test( - AsyncAlternator, expected_actions=actions, attrs={"parity_limit": 3}, - seed=10 + AsyncAlternator, + expected_actions=actions, + attrs={"parity_limit": 3}, + seed=10, ) def test_detect_streak(self): @@ -1036,14 +1079,15 @@ def test_detect_streak(self): axl.Defector(), expected_actions=actions, attrs={"recorded_defects": 119}, - seed=10 + seed=10, ) def test_detect_random(self): """Tests that detect_random() is triggered on a Random opponent and that the strategy defects thereafter.""" - match = axl.Match((axl.SecondByHarrington(), axl.Random()), seed=10, - turns=31) + match = axl.Match( + (axl.SecondByHarrington(), axl.Random()), seed=10, turns=31 + ) match.play() player = match.players[0] # Check that detect_random(30) is True. @@ -1149,14 +1193,18 @@ def test_strategy(self): actions = [(C, D)] + [(D, D)] * 8 self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"score_to_beat_inc": 5} + axl.Defector(), + expected_actions=actions, + attrs={"score_to_beat_inc": 5}, ) actions = [(C, D)] + [(D, D)] * 8 # On tenth turn, try a fresh start actions += [(C, D), (C, D)] + [(D, D)] * 2 self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"last_fresh_start": 11} + axl.Defector(), + expected_actions=actions, + attrs={"last_fresh_start": 11}, ) actions = [(C, C), (C, D)] @@ -1333,9 +1381,28 @@ def test_defects_on_turn_10_against_defector(self): self.versus_test(axl.Defector(), expected_actions=actions) def test_defection_logic_triggered(self): - actions = [(C, D), (C, D), (C, C), (C, D), (C, D), (C, C), (C, D), (C, D), - (C, C), (C, D), (D, D), (D, C), (C, D), (D, D), (D, C), (C, D), - (D, D), (D, C), (C, D), (D, D)] + actions = [ + (C, D), + (C, D), + (C, C), + (C, D), + (C, D), + (C, C), + (C, D), + (C, D), + (C, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + ] self.versus_test(axl.CyclerDDC(), expected_actions=actions) def test_defection_logic_not_triggered(self): @@ -1409,7 +1476,9 @@ class TestRichardHufford(TestPlayer): def test_strategy(self): actions = [(C, C)] * 19 + [(D, C), (C, C), (C, C)] self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"streak_needed": 14} + axl.Cooperator(), + expected_actions=actions, + attrs={"streak_needed": 14}, ) actions = [(C, C)] * 19 + [(D, C), (C, C)] @@ -1418,14 +1487,18 @@ def test_strategy(self): ] # This is the first Cooperation that gets counted on the new streak actions += [(C, C)] * 13 + [(D, C), (C, C), (C, C)] self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"streak_needed": 11} + axl.Cooperator(), + expected_actions=actions, + attrs={"streak_needed": 11}, ) opponent_actions = [C] * 20 + [D] BoredCooperator = axl.MockPlayer(actions=opponent_actions) actions = [(C, C)] * 19 + [(D, C), (C, D), (C, C)] self.versus_test( - BoredCooperator, expected_actions=actions, attrs={"streak_needed": 31} + BoredCooperator, + expected_actions=actions, + attrs={"streak_needed": 31}, ) actions = [(C, D)] # "Disagreement" @@ -1434,7 +1507,9 @@ def test_strategy(self): actions += [(C, D)] # TFT. Disagreement actions += [(D, C)] # Three of last four are disagreements. actions += [(C, C)] # TFT. Disagreement - actions += [(D, D)] # Three of last four are disagreements. Disagreement + actions += [ + (D, D) + ] # Three of last four are disagreements. Disagreement actions += [(D, D)] # Three of last four are disagreements. actions += [(D, D)] # Now there are 5/9 disagreements, so Defect. self.versus_test( @@ -1470,23 +1545,39 @@ def test_strategy(self): # It's actually impossible to Defect on the third move. actions += [(D, D)] # (D, C, *) gets updated, then checked. actions += [(C, D)] # (D, C, *) gets updated, but (D, D, *) checked. - actions += [(D, D)] * 30 # (D, D, *) gets updated and checked from here on. + actions += [ + (D, D) + ] * 30 # (D, D, *) gets updated and checked from here on. self.versus_test(axl.Defector(), expected_actions=actions) actions = [(C, C), (C, D)] - actions += [(C, C)] # Increment (C, C, C). Check (C, C, *). Cooperate. + actions += [ + (C, C) + ] # Increment (C, C, C). Check (C, C, *). Cooperate. # Reminder that first C is default value and last C is opponent's first move. - actions += [(C, D)] # Increment (C, C, D). Check (D, C, *) = 0. Cooperate. - actions += [(C, C)] # Increment (D, C, C). Check (C, C, *) = 0. Cooperate. + actions += [ + (C, D) + ] # Increment (C, C, D). Check (D, C, *) = 0. Cooperate. + actions += [ + (C, C) + ] # Increment (D, C, C). Check (C, C, *) = 0. Cooperate. # There is one Defection and one Cooperation in this scenario, # but the Cooperation was due to a default value only. We can see where this is going. - actions += [(C, D)] # Increment (C, C, D). Check (D, C, *) = 1. Cooperate. - actions += [(D, C)] # Increment (D, C, C). Check (C, C, *) = -1. Defect. + actions += [ + (C, D) + ] # Increment (C, C, D). Check (D, C, *) = 1. Cooperate. + actions += [ + (D, C) + ] # Increment (D, C, C). Check (C, C, *) = -1. Defect. actions += [ (C, D) ] # Increment (C, C, D). Check (D, D, *) = 0 (New). Cooperate. - actions += [(D, C)] # Increment (D, D, C). Check (C, C, *) < 0. Defect. - actions += [(C, D)] # Increment (C, C, D). Check (D, D, *) > 0. Cooperate. + actions += [ + (D, C) + ] # Increment (D, D, C). Check (C, C, *) < 0. Defect. + actions += [ + (C, D) + ] # Increment (C, C, D). Check (D, D, *) > 0. Cooperate. actions += [(D, C), (C, D)] * 15 # This pattern continues for a while. actions += [ (D, C), @@ -1670,7 +1761,9 @@ def test_strategy(self): opponent_actions = [C] * 100 + [D] * 10 Change_of_Heart = axl.MockPlayer(actions=opponent_actions) actions = [(C, C)] * 100 + [(C, D)] * 4 - self.versus_test(Change_of_Heart, expected_actions=actions, attrs={"credit": 2}) + self.versus_test( + Change_of_Heart, expected_actions=actions, attrs={"credit": 2} + ) Change_of_Heart = axl.MockPlayer(actions=opponent_actions) actions += [(C, D)] * 2 self.versus_test( @@ -1678,6 +1771,7 @@ def test_strategy(self): ) # Still Cooperate, because Defect rate is low + class TestRowsam(TestPlayer): name = "Second by Rowsam" player = axl.SecondByRowsam @@ -1699,12 +1793,18 @@ def test_strategy(self): # Against a Defector should eventually enter Defect mode actions = [(C, D)] * 5 actions += [(D, D), (C, D), (D, D)] # Do a Coop-Def cycle - self.versus_test(axl.Defector(), expected_actions=actions, attrs={ - "distrust_points": 5}) + self.versus_test( + axl.Defector(), + expected_actions=actions, + attrs={"distrust_points": 5}, + ) actions += [(C, D)] * 3 # Continue for now actions += [(D, D)] * 100 # Now Defect mode - self.versus_test(axl.Defector(), expected_actions=actions, attrs={ - "distrust_points": 10, "mode": "Defect"}) + self.versus_test( + axl.Defector(), + expected_actions=actions, + attrs={"distrust_points": 10, "mode": "Defect"}, + ) # Test specific score scenarios # 5 Defects @@ -1712,8 +1812,11 @@ def test_strategy(self): custom_opponent = axl.MockPlayer(actions=opponent_actions) actions = [(C, D)] * 5 actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 5, "current_score": 0}) + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 5, "current_score": 0}, + ) # 3 Defects opponent_actions = [D] * 3 + [C] * 100 @@ -1721,8 +1824,11 @@ def test_strategy(self): actions = [(C, D)] * 3 actions += [(C, C)] * 2 actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 3, "current_score": 6}) + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 3, "current_score": 6}, + ) # 2 Defects opponent_actions = [D] * 2 + [C] * 100 @@ -1730,8 +1836,11 @@ def test_strategy(self): actions = [(C, D)] * 2 actions += [(C, C)] * 3 actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 2, "current_score": 9}) + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 2, "current_score": 9}, + ) # 1 Defect opponent_actions = [D] * 1 + [C] * 100 @@ -1739,8 +1848,11 @@ def test_strategy(self): actions = [(C, D)] * 1 actions += [(C, C)] * 4 actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 1, "current_score": 12}) + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 1, "current_score": 12}, + ) # Test that some distrust_points wear off. opponent_actions = [D] * 3 + [C] * 100 @@ -1748,27 +1860,42 @@ def test_strategy(self): actions = [(C, D)] * 3 actions += [(C, C)] * 2 actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 3, "current_score": 6}) + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 3, "current_score": 6}, + ) custom_opponent = axl.MockPlayer(actions=opponent_actions) actions += [(C, C), (D, C)] # Complete Coop-Def cycle actions += [(C, C)] * 3 actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 4, "current_score": 28}) + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 4, "current_score": 28}, + ) custom_opponent = axl.MockPlayer(actions=opponent_actions) actions += [(C, C), (D, C)] # Complete Coop-Def cycle actions += [(C, C)] * 4 # No defect or cycle this time. - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 3, "current_score": 50}) # One point wears off. + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 3, "current_score": 50}, + ) # One point wears off. custom_opponent = axl.MockPlayer(actions=opponent_actions) actions += [(C, C)] * 18 - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 2}) # Second point wears off + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 2}, + ) # Second point wears off custom_opponent = axl.MockPlayer(actions=opponent_actions) actions += [(C, C)] * 18 - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 2}) # But no more + self.versus_test( + custom_opponent, + expected_actions=actions, + attrs={"distrust_points": 2}, + ) # But no more class TestAppold(TestPlayer): @@ -1787,64 +1914,93 @@ class TestAppold(TestPlayer): def test_cooperate_against_cooperating_opponent(self): """Strategy should cooperate 100% of the time with a fully cooperating opponent.""" actions = [(C, C)] * 100 - self.versus_test(axl.Cooperator(), expected_actions=actions, - attrs={ - "first_opp_def": False, - "total_num_of_x": {C: 99, D: 1}, - "opp_c_after_x": {C: 99, D: 1} - }) + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + attrs={ + "first_opp_def": False, + "total_num_of_x": {C: 99, D: 1}, + "opp_c_after_x": {C: 99, D: 1}, + }, + ) def test_cooperate_on_first_four_turns(self): """Strategy will cooperate on the first four turns regardless of opponent.""" # Hypothesis opportunity: choose random opponent player_expected_actions = [C, C, C, C] coplayer_expected_actions = [D, D, D, D] - expected_actions = list(zip(player_expected_actions, coplayer_expected_actions)) - self.versus_test(axl.Defector(), turns=4, expected_actions=expected_actions, - attrs={ - "first_opp_def": False, - "total_num_of_x": {C: 3, D: 1}, - "opp_c_after_x": {C: 0, D: 1} - }) + expected_actions = list( + zip(player_expected_actions, coplayer_expected_actions) + ) + self.versus_test( + axl.Defector(), + turns=4, + expected_actions=expected_actions, + attrs={ + "first_opp_def": False, + "total_num_of_x": {C: 3, D: 1}, + "opp_c_after_x": {C: 0, D: 1}, + }, + ) def test_fifth_move_cooperate(self): """Strategy will cooperate on a fifth move defection and set first_opp_def.""" player_expected_actions = [C, C, C, C, C, C] coplayer_expected_actions = [C, C, C, C, D, C] coplayer = axl.MockPlayer(actions=coplayer_expected_actions) - expected_actions = list(zip(player_expected_actions, coplayer_expected_actions)) - self.versus_test(coplayer, turns=6, expected_actions=expected_actions, - attrs={ - "first_opp_def": True, - "total_num_of_x": {C: 5, D: 1}, - "opp_c_after_x": {C: 4, D: 1} - }) + expected_actions = list( + zip(player_expected_actions, coplayer_expected_actions) + ) + self.versus_test( + coplayer, + turns=6, + expected_actions=expected_actions, + attrs={ + "first_opp_def": True, + "total_num_of_x": {C: 5, D: 1}, + "opp_c_after_x": {C: 4, D: 1}, + }, + ) def test_sixth_move_cooperate(self): """Strategy will cooperate on a sixth move defection if it is the first.""" player_expected_actions = [C, C, C, C, C, C, C] coplayer_expected_actions = [C, C, C, C, C, D, C] coplayer = axl.MockPlayer(actions=coplayer_expected_actions) - expected_actions = list(zip(player_expected_actions, coplayer_expected_actions)) - self.versus_test(coplayer, turns=7, expected_actions=expected_actions, seed=1, - attrs={ - "first_opp_def": True, - "total_num_of_x": {C: 6, D: 1}, - "opp_c_after_x": {C: 5, D: 1} - }) + expected_actions = list( + zip(player_expected_actions, coplayer_expected_actions) + ) + self.versus_test( + coplayer, + turns=7, + expected_actions=expected_actions, + seed=1, + attrs={ + "first_opp_def": True, + "total_num_of_x": {C: 6, D: 1}, + "opp_c_after_x": {C: 5, D: 1}, + }, + ) def test_sixth_move_defect(self): """Strategy will defect on a sixth move defection if it is not the first.""" player_expected_actions = [C, C, C, C, C, C, D] coplayer_expected_actions = [C, C, C, C, D, D, C] coplayer = axl.MockPlayer(actions=coplayer_expected_actions) - expected_actions = list(zip(player_expected_actions, coplayer_expected_actions)) - self.versus_test(coplayer, turns=7, expected_actions=expected_actions, seed=10, - attrs={ - "first_opp_def": True, - "total_num_of_x": {C: 6, D: 1}, - "opp_c_after_x": {C: 4, D: 1} - }) + expected_actions = list( + zip(player_expected_actions, coplayer_expected_actions) + ) + self.versus_test( + coplayer, + turns=7, + expected_actions=expected_actions, + seed=10, + attrs={ + "first_opp_def": True, + "total_num_of_x": {C: 6, D: 1}, + "opp_c_after_x": {C: 4, D: 1}, + }, + ) def test_later_single_defection_forgiveness(self): # An opponent who defects after a long time, then tries cooperating @@ -1856,8 +2012,12 @@ def test_later_single_defection_forgiveness(self): actions += [(C, D)] # But we forgive it (and record it). actions += [(C, C)] * 10 - self.versus_test(MostlyCooperates, expected_actions=actions, seed=1, - attrs={"first_opp_def": True}) + self.versus_test( + MostlyCooperates, + expected_actions=actions, + seed=1, + attrs={"first_opp_def": True}, + ) def test_stochastic_behavior(self): opponent = axl.Defector() @@ -1871,19 +2031,24 @@ def test_stochastic_behavior(self): # Then defect most of the time, depending on the random number. We # don't defect 100% of the time, because of the way that initialize # opp_c_after_x. - actions += [(D, D), - (C, D), - (D, D), - (D, D), # C can never be two moves after a C. - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (C, D), - (C, D), - (D, D), + actions += [ + (D, D), + (C, D), + (D, D), + (D, D), # C can never be two moves after a C. + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (C, D), + (C, D), + (D, D), ] - self.versus_test(opponent, expected_actions=actions, seed=1018, - attrs={"first_opp_def": True}) + self.versus_test( + opponent, + expected_actions=actions, + seed=1018, + attrs={"first_opp_def": True}, + ) diff --git a/axelrod/tests/strategies/test_backstabber.py b/axelrod/tests/strategies/test_backstabber.py index 1f580c9cf..c4a988aec 100644 --- a/axelrod/tests/strategies/test_backstabber.py +++ b/axelrod/tests/strategies/test_backstabber.py @@ -117,9 +117,13 @@ def test_when_alt_strategy_is_triggered(self): def test_starting_defect_keeps_alt_strategy_from_triggering(self): opponent_actions_suffix = [C, D, C, D, D] + 3 * [C] - expected_actions_suffix = [(C, C), (C, D), (C, C), (C, D), (C, D)] + 3 * [ - (D, C) - ] + expected_actions_suffix = [ + (C, C), + (C, D), + (C, C), + (C, D), + (C, D), + ] + 3 * [(D, C)] defects_on_first = [D] + [C] * 6 defects_on_first_actions = [(C, D)] + [(C, C)] * 6 diff --git a/axelrod/tests/strategies/test_bush_mosteller.py b/axelrod/tests/strategies/test_bush_mosteller.py index 2cc881a57..b4753fec7 100644 --- a/axelrod/tests/strategies/test_bush_mosteller.py +++ b/axelrod/tests/strategies/test_bush_mosteller.py @@ -12,7 +12,7 @@ class TestBushMostellar(TestPlayer): expected_classifier = { "memory_depth": float("inf"), "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, diff --git a/axelrod/tests/strategies/test_calculator.py b/axelrod/tests/strategies/test_calculator.py index 0d5eedfb3..e99114600 100644 --- a/axelrod/tests/strategies/test_calculator.py +++ b/axelrod/tests/strategies/test_calculator.py @@ -25,7 +25,9 @@ class TestCalculator(TestPlayer): def test_twenty_rounds_joss_for_cyclers(self): """Uses axelrod.strategies.axelrod_first.FirstByJoss strategy for first 20 rounds""" seed = 4 - match = axl.Match((axl.FirstByJoss(), axl.Alternator()), turns=20, seed=seed) + match = axl.Match( + (axl.FirstByJoss(), axl.Alternator()), turns=20, seed=seed + ) match.play() self.versus_test( axl.Alternator(), expected_actions=match.result, seed=seed @@ -34,17 +36,24 @@ def test_twenty_rounds_joss_for_cyclers(self): def test_twenty_rounds_joss_then_defects_for_cyclers(self): """Uses axelrod.strategies.axelrod_first.FirstByJoss strategy for first 20 rounds""" seed = 4 - match = axl.Match((axl.FirstByJoss(), axl.Alternator()), turns=20, seed=seed) + match = axl.Match( + (axl.FirstByJoss(), axl.Alternator()), turns=20, seed=seed + ) match.play() expected_actions = match.result + [(D, C), (D, D), (D, C), (D, D)] self.versus_test( - axl.Alternator(), expected_actions=expected_actions, seed=seed, turns=24, + axl.Alternator(), + expected_actions=expected_actions, + seed=seed, + turns=24, ) def test_twenty_rounds_joss_for_noncyclers(self): """Uses axelrod.strategies.axelrod_first.FirstByJoss strategy for first 20 rounds""" seed = 4 - match = axl.Match((axl.FirstByJoss(), axl.AntiCycler()), turns=20, seed=seed) + match = axl.Match( + (axl.FirstByJoss(), axl.AntiCycler()), turns=20, seed=seed + ) match.play() self.versus_test( axl.AntiCycler(), expected_actions=match.result, seed=seed @@ -53,16 +62,29 @@ def test_twenty_rounds_joss_for_noncyclers(self): def test_twenty_rounds_joss_then_tft_for_noncyclers(self): """Uses axelrod.strategies.axelrod_first.FirstByJoss strategy for first 20 rounds""" seed = 4 - match = axl.Match((axl.FirstByJoss(), axl.AntiCycler()), turns=20, seed=seed) + match = axl.Match( + (axl.FirstByJoss(), axl.AntiCycler()), turns=20, seed=seed + ) match.play() - expected_actions = match.result + [(C, C), (C, C), (C, D), (D, C), (C, C)] + expected_actions = match.result + [ + (C, C), + (C, C), + (C, D), + (D, C), + (C, C), + ] self.versus_test( - axl.AntiCycler(), expected_actions=expected_actions, seed=seed, turns=24, + axl.AntiCycler(), + expected_actions=expected_actions, + seed=seed, + turns=24, ) def test_edge_case_calculator_sees_cycles_of_size_ten(self): ten_length_cycle = [C, D, C, C, D, C, C, C, D, C] - self.assertEqual(detect_cycle((ten_length_cycle * 2)), tuple(ten_length_cycle)) + self.assertEqual( + detect_cycle((ten_length_cycle * 2)), tuple(ten_length_cycle) + ) ten_cycle_twenty_rounds = get_joss_strategy_actions( ten_length_cycle * 2, indices_to_flip=[16] @@ -85,7 +107,9 @@ def test_edge_case_calculator_ignores_cycles_gt_len_ten(self): ) opponent_actions = twenty_rounds_of_eleven_len_cycle[:-1] + [D] + [C, D] - self.assertEqual(detect_cycle(opponent_actions), tuple(eleven_length_cycle)) + self.assertEqual( + detect_cycle(opponent_actions), tuple(eleven_length_cycle) + ) uses_tit_for_tat_after_twenty_rounds = twenty_rounds + [(D, C), (C, D)] self.versus_test( @@ -105,13 +129,19 @@ def test_get_joss_strategy_actions(self): self.assertEqual(get_joss_strategy_actions(opponent, []), without_flip) self.assertEqual( - get_joss_strategy_actions(opponent, flip_never_occurs_at_index_zero), + get_joss_strategy_actions( + opponent, flip_never_occurs_at_index_zero + ), without_flip, ) - self.assertEqual(get_joss_strategy_actions(opponent, flip_indices), with_flip) + self.assertEqual( + get_joss_strategy_actions(opponent, flip_indices), with_flip + ) -def get_joss_strategy_actions(opponent_moves: list, indices_to_flip: list) -> list: +def get_joss_strategy_actions( + opponent_moves: list, indices_to_flip: list +) -> list: """ Takes a list of opponent moves and returns a tuple list of [(Joss moves, opponent moves)]. "indices_to_flip" are the indices where Joss differs from it's expected TitForTat. diff --git a/axelrod/tests/strategies/test_cooperator.py b/axelrod/tests/strategies/test_cooperator.py index aca2290ae..7dd4e7d3b 100644 --- a/axelrod/tests/strategies/test_cooperator.py +++ b/axelrod/tests/strategies/test_cooperator.py @@ -41,7 +41,9 @@ class TestTrickyCooperator(TestPlayer): def test_strategy(self): # Test if it tries to trick opponent. - self.versus_test(axl.Cooperator(), [(C, C), (C, C), (C, C), (D, C), (D, C)]) + self.versus_test( + axl.Cooperator(), [(C, C), (C, C), (C, C), (D, C), (D, C)] + ) opponent_actions = [C, C, C, C, D, D] expected_actions = [(C, C), (C, C), (C, C), (D, C), (D, D), (C, D)] @@ -68,7 +70,7 @@ def test_cooperates_in_first_three_rounds(self): self.versus_test(axl.Alternator(), expected_actions=against_alternator) def test_defects_after_three_rounds_if_opponent_only_cooperated_in_max_history_depth_ten( - self + self, ): against_cooperator = [(C, C)] * 3 + [(D, C)] * 20 self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) @@ -76,4 +78,6 @@ def test_defects_after_three_rounds_if_opponent_only_cooperated_in_max_history_d def test_defects_when_opponent_has_no_defections_to_history_depth_ten(self): opponent_actions = [D] + [C] * 10 + [D, C] expected_actions = [(C, D)] + [(C, C)] * 10 + [(D, D), (C, C)] - self.versus_test(axl.MockPlayer(actions=opponent_actions), expected_actions) + self.versus_test( + axl.MockPlayer(actions=opponent_actions), expected_actions + ) diff --git a/axelrod/tests/strategies/test_cycler.py b/axelrod/tests/strategies/test_cycler.py index 675d4b4d5..7e3e7e230 100644 --- a/axelrod/tests/strategies/test_cycler.py +++ b/axelrod/tests/strategies/test_cycler.py @@ -1,6 +1,5 @@ """Tests for the Cycler strategies.""" import itertools -import random import unittest import axelrod as axl @@ -98,7 +97,9 @@ def test_memory_depth_is_len_cycle_minus_one(self): def test_cycler_works_as_expected(self): expected = [(C, D), (D, D), (D, D), (C, D)] * 2 self.versus_test( - axl.Defector(), expected_actions=expected, init_kwargs={"cycle": "CDDC"} + axl.Defector(), + expected_actions=expected, + init_kwargs={"cycle": "CDDC"}, ) def test_cycle_raises_value_error_on_bad_cycle_str(self): @@ -172,11 +173,17 @@ def test_normalized_parameters(self): ) cycle = "C" * random.randint(0, 20) + "D" * random.randint(0, 20) - self.assertEqual(self.player_class(cycle=cycle, seed=1)._normalize_parameters(cycle=cycle), - (cycle, len(cycle))) + self.assertEqual( + self.player_class(cycle=cycle, seed=1)._normalize_parameters( + cycle=cycle + ), + (cycle, len(cycle)), + ) cycle_length = random.randint(1, 20) - random_cycle, cycle_length2 = self.player_class(cycle=cycle, seed=1)._normalize_parameters(cycle_length=cycle_length) + random_cycle, cycle_length2 = self.player_class( + cycle=cycle, seed=1 + )._normalize_parameters(cycle_length=cycle_length) self.assertEqual(len(random_cycle), cycle_length) self.assertEqual(cycle_length, cycle_length2) diff --git a/axelrod/tests/strategies/test_darwin.py b/axelrod/tests/strategies/test_darwin.py index 50c0c4f2b..146489f8a 100644 --- a/axelrod/tests/strategies/test_darwin.py +++ b/axelrod/tests/strategies/test_darwin.py @@ -76,7 +76,9 @@ def test_against_geller_and_mindreader(self): def test_reset_history_and_attributes(self): # Overwrite this method because Darwin does not reset - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) + self.versus_test( + axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4 + ) p1 = self.player() self.assertEqual(p1.genome, [D, C, C, C, D]) @@ -89,7 +91,9 @@ def test_all_darwin_instances_share_one_genome(self): p2 = self.player() self.assertIs(p1.genome, p2.genome) - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) + self.versus_test( + axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4 + ) self.assertEqual(p2.genome, [D, C, C, C, D]) self.assertIs(p1.genome, p2.genome) @@ -97,7 +101,9 @@ def test_all_darwin_instances_share_one_genome(self): self.assertIs(p3.genome, p2.genome) def test_reset_genome(self): - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) + self.versus_test( + axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4 + ) self.player.reset_genome() self.assertEqual(self.player().genome, [C]) diff --git a/axelrod/tests/strategies/test_dbs.py b/axelrod/tests/strategies/test_dbs.py index 93d61e41a..a3afb81a4 100644 --- a/axelrod/tests/strategies/test_dbs.py +++ b/axelrod/tests/strategies/test_dbs.py @@ -67,7 +67,9 @@ def test_move_gen_cooperator(self): """ expected_output = [D, D, D, D] for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.cooperator_policy, depth_search_tree=5) + out_move = dbs.move_gen( + inp, self.cooperator_policy, depth_search_tree=5 + ) self.assertEqual(out_move, out) def test_minimaxTreeSearch_defector(self): @@ -89,7 +91,9 @@ def test_move_gen_defector(self): """ expected_output = [D, D, D, D] for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.defector_policy, depth_search_tree=5) + out_move = dbs.move_gen( + inp, self.defector_policy, depth_search_tree=5 + ) self.assertEqual(out_move, out) def test_minimaxTreeSearch_titForTat(self): @@ -125,7 +129,9 @@ def test_move_gen_titForTat(self): """ expected_output = [C, C, C, C] for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.titForTat_policy, depth_search_tree=5) + out_move = dbs.move_gen( + inp, self.titForTat_policy, depth_search_tree=5 + ) self.assertEqual(out_move, out) def test_minimaxTreeSearch_alternator(self): @@ -147,7 +153,9 @@ def test_move_gen_alternator(self): """ expected_output = [D, D, D, D] for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.random_policy, depth_search_tree=5) + out_move = dbs.move_gen( + inp, self.random_policy, depth_search_tree=5 + ) self.assertEqual(out_move, out) def test_minimaxTreeSearch_random(self): @@ -169,7 +177,9 @@ def test_move_gen_random(self): """ expected_output = [D, D, D, D] for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.random_policy, depth_search_tree=5) + out_move = dbs.move_gen( + inp, self.random_policy, depth_search_tree=5 + ) self.assertEqual(out_move, out) def test_minimaxTreeSearch_grudger(self): @@ -192,7 +202,9 @@ def test_move_gen_grudger(self): """ expected_output = [C, D, D, D] for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.grudger_policy, depth_search_tree=5) + out_move = dbs.move_gen( + inp, self.grudger_policy, depth_search_tree=5 + ) self.assertEqual(out_move, out) diff --git a/axelrod/tests/strategies/test_doubler.py b/axelrod/tests/strategies/test_doubler.py index e3d436302..a02f42695 100644 --- a/axelrod/tests/strategies/test_doubler.py +++ b/axelrod/tests/strategies/test_doubler.py @@ -22,7 +22,7 @@ class TestDoubler(TestPlayer): } def test_defects_if_opponent_last_play_is_D_and_defections_gt_two_times_cooperations( - self + self, ): opponent_plays = [C] * 7 + [D] * 4 + [C] actions = [(C, C)] * 7 + [(C, D)] * 4 + [(D, C)] @@ -31,7 +31,7 @@ def test_defects_if_opponent_last_play_is_D_and_defections_gt_two_times_cooperat ) def test_defects_if_opponent_last_play_D_and_defections_equal_two_times_cooperations( - self + self, ): opponent_plays = [C] * 8 + [D] * 4 + [C] actions = [(C, C)] * 8 + [(C, D)] * 4 + [(D, C)] diff --git a/axelrod/tests/strategies/test_evolvable_player.py b/axelrod/tests/strategies/test_evolvable_player.py index 6ed840b0b..ea0fb77fc 100644 --- a/axelrod/tests/strategies/test_evolvable_player.py +++ b/axelrod/tests/strategies/test_evolvable_player.py @@ -3,7 +3,11 @@ import axelrod as axl from axelrod.action import Action -from axelrod.evolvable_player import copy_lists, crossover_dictionaries, crossover_lists +from axelrod.evolvable_player import ( + copy_lists, + crossover_dictionaries, + crossover_lists, +) from .test_player import TestPlayer @@ -17,8 +21,7 @@ class PartialedClass(cls): seed = kwargs["seed"] except KeyError: kwargs["seed"] = 1 - __init__ = functools.partialmethod( - cls.__init__, **kwargs) + __init__ = functools.partialmethod(cls.__init__, **kwargs) return PartialedClass @@ -45,7 +48,9 @@ def mutate(self): def crossover(self, other): if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") + raise TypeError( + "Crossover must be between the same player classes." + ) value = self.value + other.value return EvolvableTestOpponent(value) @@ -125,7 +130,11 @@ def test_crossover(self): players.append(player) player1, player2 = players crossed = player1.crossover(player2) - if player1 != crossed and player2 != crossed and crossed == crossed.clone(): + if ( + player1 != crossed + and player2 != crossed + and crossed == crossed.clone() + ): return # Should never get here unless a change breaks the test, so don't include in coverage. self.assertFalse(True) # pragma: no cover @@ -140,7 +149,9 @@ def test_serialization(self): """Serializing and deserializing should return the original player.""" player = self.player(seed=1) serialized = player.serialize_parameters() - deserialized_player = player.__class__.deserialize_parameters(serialized) + deserialized_player = player.__class__.deserialize_parameters( + serialized + ) self.assertEqual(player, deserialized_player) self.assertEqual(deserialized_player, deserialized_player.clone()) @@ -149,7 +160,7 @@ def test_serialization_csv(self): player = self.player(seed=1) serialized = player.serialize_parameters() s = "0, 1, {}, 3".format(serialized) - s2 = s.split(',')[2] + s2 = s.split(",")[2] deserialized_player = player.__class__.deserialize_parameters(s2) self.assertEqual(player, deserialized_player) self.assertEqual(deserialized_player, deserialized_player.clone()) @@ -178,7 +189,9 @@ def test_behavior(self): self.behavior_test(player, parent_player) serialized = player.serialize_parameters() - deserialized_player = player.__class__.deserialize_parameters(serialized) + deserialized_player = player.__class__.deserialize_parameters( + serialized + ) self.behavior_test(deserialized_player, parent_player) def test_seed_propagation(self): @@ -202,7 +215,6 @@ def test_seed_preservation(self): class TestUtilityFunctions(unittest.TestCase): - def test_copy_lists(self): l1 = [list(range(10)), list(range(20))] l2 = copy_lists(l1) @@ -221,12 +233,12 @@ def test_crossover_lists(self): self.assertEqual(crossed, list1[:1] + list2[1:]) def test_crossover_dictionaries(self): - dict1 = {'1': 1, '2': 2, '3': 3} - dict2 = {'1': 'a', '2': 'b', '3': 'c'} + dict1 = {"1": 1, "2": 2, "3": 3} + dict2 = {"1": "a", "2": "b", "3": "c"} rng = axl.RandomGenerator(seed=1) crossed = crossover_dictionaries(dict1, dict2, rng) - self.assertEqual(crossed, {'1': 1, '2': 'b', '3': 'c'}) + self.assertEqual(crossed, {"1": 1, "2": "b", "3": "c"}) rng = axl.RandomGenerator(seed=2) crossed = crossover_dictionaries(dict1, dict2, rng) diff --git a/axelrod/tests/strategies/test_finite_state_machines.py b/axelrod/tests/strategies/test_finite_state_machines.py index f3525b0e1..fe491679b 100644 --- a/axelrod/tests/strategies/test_finite_state_machines.py +++ b/axelrod/tests/strategies/test_finite_state_machines.py @@ -2,7 +2,9 @@ import unittest import axelrod as axl -from axelrod.compute_finite_state_machine_memory import get_memory_from_transitions +from axelrod.compute_finite_state_machine_memory import ( + get_memory_from_transitions, +) from axelrod.evolvable_player import InsufficientParametersError from axelrod.strategies.finite_state_machines import ( EvolvableFSMPlayer, @@ -46,8 +48,15 @@ def test__eq__false_by_state(self): self.assertFalse(new_two_state.__eq__(self.two_state)) def test__eq__false_by_transition(self): - different_transitions = ((1, C, 0, D), (1, D, 0, D), (0, C, 1, D), (0, D, 1, C)) - new_two_state = SimpleFSM(transitions=different_transitions, initial_state=1) + different_transitions = ( + (1, C, 0, D), + (1, D, 0, D), + (0, C, 1, D), + (0, D, 1, C), + ) + new_two_state = SimpleFSM( + transitions=different_transitions, initial_state=1 + ) self.assertFalse(new_two_state.__eq__(self.two_state)) @@ -91,7 +100,9 @@ def test_state_setter_raises_error_for_bad_input(self): with self.assertRaises(ValueError) as cm: self.two_state.state = 5 error_msg = cm.exception.args[0] - self.assertEqual(error_msg, "state: 5 does not have values for both C and D") + self.assertEqual( + error_msg, "state: 5 does not have values for both C and D" + ) class TestSampleFSMPlayer(TestPlayer): @@ -157,7 +168,12 @@ def test_wsls(self): """Tests that the player defined by the table for TFT is in fact WSLS (also known as Pavlov.""" wsls_init_kwargs = { - "transitions": ((1, C, 1, C), (1, D, 2, D), (2, C, 2, D), (2, D, 1, C)), + "transitions": ( + (1, C, 1, C), + (1, D, 2, D), + (2, C, 2, D), + (2, D, 1, C), + ), "initial_state": 1, "initial_action": C, } @@ -272,7 +288,9 @@ def test_strategy(self): opponent = axl.MockPlayer([D, D, C, C, D]) actions = [(C, D), (C, D), (C, C), (D, C), (C, D)] self.versus_test( - opponent, expected_actions=actions, init_kwargs={"transitions": transitions} + opponent, + expected_actions=actions, + init_kwargs={"transitions": transitions}, ) def test_memory(self): @@ -280,7 +298,10 @@ def test_memory(self): Test the memory depth using implemented algorithm """ transitions = self.player().fsm._state_transitions - self.assertEqual(get_memory_from_transitions(transitions), self.expected_classifier["memory_depth"]) + self.assertEqual( + get_memory_from_transitions(transitions), + self.expected_classifier["memory_depth"], + ) class TestFortress3(TestFSMPlayer): @@ -311,7 +332,15 @@ def test_strategy(self): state_and_actions = [(1, C), (1, D), (2, C), (1, C)] self.transitions_test(state_and_actions) - state_and_actions = [(1, D), (2, D), (3, C), (3, C), (3, C), (3, D), (1, C)] * 2 + state_and_actions = [ + (1, D), + (2, D), + (3, C), + (3, C), + (3, C), + (3, D), + (1, C), + ] * 2 self.transitions_test(state_and_actions) @unittest.expectedFailure @@ -415,7 +444,15 @@ def test_strategy(self): ] + [(7, D), (7, C), (8, C), (8, D), (6, D)] * 3 self.transitions_test(state_and_actions) - state_and_actions = [(0, D), (1, C), (2, D), (3, C), (5, D), (3, C), (5, C)] + [ + state_and_actions = [ + (0, D), + (1, C), + (2, D), + (3, C), + (5, D), + (3, C), + (5, C), + ] + [ (7, C), (8, D), (6, C), @@ -520,7 +557,9 @@ class TestRipoff(TestFSMPlayer): """ def test_strategy(self): - state_and_actions = [(1, C), (2, C)] * 3 + [(1, D)] + [(3, C), (3, D)] * 5 + state_and_actions = ( + [(1, C), (2, C)] * 3 + [(1, D)] + [(3, C), (3, D)] * 5 + ) self.transitions_test(state_and_actions) state_and_actions = [(1, C), (2, D)] + [(3, D)] * 5 @@ -621,7 +660,11 @@ class TestSolutionB1(TestFSMPlayer): def test_strategy(self): state_and_actions = ( - [(1, D)] * 3 + [(1, C)] + [(2, C)] * 3 + [(2, D)] + [(3, C), (3, D)] * 3 + [(1, D)] * 3 + + [(1, C)] + + [(2, C)] * 3 + + [(2, D)] + + [(3, C), (3, D)] * 3 ) self.transitions_test(state_and_actions) @@ -802,7 +845,9 @@ class TestEvolvedFSM16(TestFSMPlayer): def test_strategy(self): # finished: 0, - state_and_actions = [(0, C)] * 3 + [(0, D)] + [(12, D), (11, D), (5, C)] * 3 + state_and_actions = ( + [(0, C)] * 3 + [(0, D)] + [(12, D), (11, D), (5, C)] * 3 + ) self.transitions_test(state_and_actions) # finished: 0, 5, 10 @@ -849,7 +894,13 @@ def test_strategy(self): self.transitions_test(state_and_actions) # finished: 0, 1, 2, 3, 5, 10, 11, 12, 13, 14, 15 - state_and_actions = to_state_seven + [(7, D), (1, D), (6, C), (5, D), (10, C)] + state_and_actions = to_state_seven + [ + (7, D), + (1, D), + (6, C), + (5, D), + (10, C), + ] self.transitions_test(state_and_actions) # finished: 0, 1, 2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15 @@ -979,7 +1030,12 @@ def test_strategy(self): # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 13, 15 to_state_eleven = [(0, D), (3, C), (10, D), (1, D), (15, D)] - state_and_actions = to_state_eleven + [(11, C), (14, C), (3, C), (10, D)] + state_and_actions = to_state_eleven + [ + (11, C), + (14, C), + (3, C), + (10, D), + ] self.transitions_test(state_and_actions) # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 15 @@ -1058,7 +1114,12 @@ def test_normalized_parameters(self): self.assertRaises( InsufficientParametersError, self.player_class, - transitions=[[0, C, 1, D], [0, D, 0, D], [1, C, 1, C], [1, D, 1, D]], + transitions=[ + [0, C, 1, D], + [0, D, 0, D], + [1, C, 1, C], + [1, D, 1, D], + ], seed=1, # To prevent exception from unset seed. ) @@ -1083,7 +1144,9 @@ def test_vector_to_instance(self): self.assertIsInstance(player, axl.EvolvableFSMPlayer) serialized = player.serialize_parameters() - deserialized_player = player.__class__.deserialize_parameters(serialized) + deserialized_player = player.__class__.deserialize_parameters( + serialized + ) self.assertEqual(player, deserialized_player) self.assertEqual(deserialized_player, deserialized_player.clone()) @@ -1098,7 +1161,8 @@ def test_mutate(self): """Test to trigger random lines in mutate""" for seed in [18, 22]: player = axl.EvolvableFSMPlayer( - num_states=4, mutation_probability=0.5, seed=seed) + num_states=4, mutation_probability=0.5, seed=seed + ) player.mutate() @@ -1126,7 +1190,7 @@ class TestEvolvableFSMPlayer4(TestEvolvablePlayer): init_parameters = { "transitions": ((1, C, 1, C), (1, D, 2, D), (2, C, 2, D), (2, D, 1, C)), "initial_state": 1, - "initial_action": C + "initial_action": C, } @@ -1135,7 +1199,8 @@ class TestEvolvableFSMPlayer4(TestEvolvablePlayer): EvolvableFSMPlayer, transitions=((1, C, 1, C), (1, D, 1, D)), initial_state=1, - initial_action=C) + initial_action=C, +) class EvolvableFSMAsFSM(TestFSMPlayer): diff --git a/axelrod/tests/strategies/test_forgiver.py b/axelrod/tests/strategies/test_forgiver.py index e83b86d76..43754f5b0 100644 --- a/axelrod/tests/strategies/test_forgiver.py +++ b/axelrod/tests/strategies/test_forgiver.py @@ -25,15 +25,19 @@ def test_strategy(self): # If opponent has defected more than 10 percent of the time, defect. self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 10) - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 10) + self.versus_test( + axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 10 + ) def test_cooperates_if_opponent_defections_is_ten_pct_and_defects_if_opponent_defections_gt_ten_pct( - self + self, ): final_action_lowers_defections_to_ten_percent = [D] + [C] * 9 expected = [(C, D)] + [(D, C)] * 9 self.versus_test( - axl.MockPlayer(actions=final_action_lowers_defections_to_ten_percent), + axl.MockPlayer( + actions=final_action_lowers_defections_to_ten_percent + ), expected_actions=expected * 5, ) @@ -62,7 +66,9 @@ class TestForgivingTitForTat(TestPlayer): def test_strategy(self): self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 5) - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 5) + self.versus_test( + axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 5 + ) self.versus_test( axl.Alternator(), expected_actions=[(C, C)] + [(C, D), (D, C)] * 5 ) @@ -77,7 +83,11 @@ def test_never_defects_if_opponent_defections_le_ten_percent(self): def test_plays_tit_for_tat_while_defections_gt_ten_percent(self): before_tft = (18 * [C] + [D]) * 3 + [D, D, D] - only_cooperates = ([(C, C)] * 18 + [(C, D)]) * 3 + [(C, D), (C, D), (C, D)] + only_cooperates = ([(C, C)] * 18 + [(C, D)]) * 3 + [ + (C, D), + (C, D), + (C, D), + ] self.versus_test( axl.MockPlayer(actions=before_tft), expected_actions=only_cooperates ) diff --git a/axelrod/tests/strategies/test_gambler.py b/axelrod/tests/strategies/test_gambler.py old mode 100755 new mode 100644 index cc58083e3..8683f9294 --- a/axelrod/tests/strategies/test_gambler.py +++ b/axelrod/tests/strategies/test_gambler.py @@ -520,14 +520,17 @@ def test_vs_alternator2(self): class TestEvolvableGambler(unittest.TestCase): - def test_receive_vector(self): plays, op_plays, op_start_plays = 1, 1, 1 player = axl.EvolvableGambler( - parameters=(plays, op_plays, op_start_plays), seed=1) + parameters=(plays, op_plays, op_start_plays), seed=1 + ) - self.assertRaises(AttributeError, axl.EvolvableGambler.__getattribute__, - *[player, 'vector']) + self.assertRaises( + AttributeError, + axl.EvolvableGambler.__getattribute__, + *[player, "vector"] + ) vector = [random.random() for _ in range(8)] player.receive_vector(vector) @@ -536,19 +539,24 @@ def test_receive_vector(self): def test_vector_to_instance(self): plays, op_plays, op_start_plays = 1, 1, 1 player = axl.EvolvableGambler( - parameters=(plays, op_plays, op_start_plays), seed=1) + parameters=(plays, op_plays, op_start_plays), seed=1 + ) vector = [random.random() for _ in range(8)] player.receive_vector(vector) - keys = create_lookup_table_keys(player_depth=plays, op_depth=op_plays, - op_openings_depth=op_start_plays) + keys = create_lookup_table_keys( + player_depth=plays, + op_depth=op_plays, + op_openings_depth=op_start_plays, + ) action_dict = dict(zip(keys, vector)) self.assertEqual(player._lookup.dictionary, action_dict) def test_create_vector_bounds(self): plays, op_plays, op_start_plays = 1, 1, 1 player = axl.EvolvableGambler( - parameters=(plays, op_plays, op_start_plays), seed=1) + parameters=(plays, op_plays, op_start_plays), seed=1 + ) lb, ub = player.create_vector_bounds() self.assertIsInstance(lb, list) self.assertIsInstance(ub, list) @@ -566,8 +574,7 @@ class TestEvolvableGambler2(TestEvolvablePlayer): player_class = axl.EvolvableGambler parent_class = axl.Gambler parent_kwargs = ["lookup_dict"] - init_parameters = {"parameters": (1, 1, 1), - "initial_actions": (C,)} + init_parameters = {"parameters": (1, 1, 1), "initial_actions": (C,)} class TestEvolvableGambler3(TestEvolvablePlayer): @@ -575,8 +582,14 @@ class TestEvolvableGambler3(TestEvolvablePlayer): player_class = axl.EvolvableGambler parent_class = axl.Gambler parent_kwargs = ["lookup_dict"] - init_parameters = {"parameters": (3, 2, 1), - "initial_actions": (C, C, C,)} + init_parameters = { + "parameters": (3, 2, 1), + "initial_actions": ( + C, + C, + C, + ), + } class TestEvolvableGambler4(TestEvolvablePlayer): @@ -584,9 +597,14 @@ class TestEvolvableGambler4(TestEvolvablePlayer): player_class = axl.EvolvableGambler parent_class = axl.Gambler parent_kwargs = ["lookup_dict"] - init_parameters = {"parameters": (2, 2, 2), - "pattern": [random.random() for _ in range(64)], - "initial_actions": (C, C,)} + init_parameters = { + "parameters": (2, 2, 2), + "pattern": [random.random() for _ in range(64)], + "initial_actions": ( + C, + C, + ), + } # Substitute EvolvableHMMPlayer as a regular HMMPlayer. @@ -594,7 +612,10 @@ class TestEvolvableGambler4(TestEvolvablePlayer): axl.EvolvableGambler, pattern=tables[("PSO Gambler 2_2_2", 2, 2, 2)], parameters=(2, 2, 2), - initial_actions=(C, C,) + initial_actions=( + C, + C, + ), ) diff --git a/axelrod/tests/strategies/test_geller.py b/axelrod/tests/strategies/test_geller.py index 38491fe08..0cb60d7c0 100644 --- a/axelrod/tests/strategies/test_geller.py +++ b/axelrod/tests/strategies/test_geller.py @@ -43,7 +43,9 @@ def test_strategy(self): """Should cooperate against cooperators and defect against defectors.""" self.versus_test(axl.Defector(), expected_actions=[(D, D)] * 5) self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 5) - self.versus_test(axl.Alternator(), expected_actions=[(C, C), (D, D)] * 5) + self.versus_test( + axl.Alternator(), expected_actions=[(C, C), (D, D)] * 5 + ) def test_strategy_against_lookerup_players(self): """ @@ -64,7 +66,9 @@ def test_returns_foil_inspection_strategy_of_opponent(self): seed=2, ) - self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)], seed=3) + self.versus_test( + axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)], seed=3 + ) self.versus_test( axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)], seed=1 @@ -91,10 +95,13 @@ def test_foil_strategy_inspection(self): def test_returns_foil_inspection_strategy_of_opponent(self): self.versus_test( - axl.GellerDefector(), expected_actions=[(D, C), (D, C), (D, C), (D, C)] + axl.GellerDefector(), + expected_actions=[(D, C), (D, C), (D, C), (D, C)], ) - self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)]) + self.versus_test( + axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)] + ) self.versus_test( axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)] @@ -122,10 +129,13 @@ def test_foil_strategy_inspection(self): def test_returns_foil_inspection_strategy_of_opponent(self): self.versus_test( - axl.GellerDefector(), expected_actions=[(D, D), (D, D), (D, D), (D, D)] + axl.GellerDefector(), + expected_actions=[(D, D), (D, D), (D, D), (D, D)], ) - self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)]) + self.versus_test( + axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)] + ) self.versus_test( axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)] diff --git a/axelrod/tests/strategies/test_gobymajority.py b/axelrod/tests/strategies/test_gobymajority.py index 6cd553880..88c5c555a 100644 --- a/axelrod/tests/strategies/test_gobymajority.py +++ b/axelrod/tests/strategies/test_gobymajority.py @@ -38,7 +38,9 @@ def test_memory_depth_infinite_soft_is_false(self): + [(C, C)] ) opponent = axl.MockPlayer(actions=opponent_actions) - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) def test_memory_depth_even_soft_is_false(self): memory_depth = 4 @@ -46,7 +48,9 @@ def test_memory_depth_even_soft_is_false(self): if self.default_soft: init_kwargs["soft"] = False - opponent = axl.MockPlayer(actions=[C] * memory_depth + [D] * memory_depth) + opponent = axl.MockPlayer( + actions=[C] * memory_depth + [D] * memory_depth + ) actions = ( [(D, C)] + [(C, C)] * 3 @@ -55,7 +59,9 @@ def test_memory_depth_even_soft_is_false(self): + [(D, C)] * 3 + [(C, C)] ) - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) def test_memory_depth_odd(self): memory_depth = 5 @@ -64,7 +70,9 @@ def test_memory_depth_odd(self): first_action = [(C, C)] else: first_action = [(D, C)] - opponent = axl.MockPlayer(actions=[C] * memory_depth + [D] * memory_depth) + opponent = axl.MockPlayer( + actions=[C] * memory_depth + [D] * memory_depth + ) actions = ( first_action + [(C, C)] * 4 @@ -73,7 +81,9 @@ def test_memory_depth_odd(self): + [(D, C)] * 3 + [(C, C)] * 2 ) - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) def test_default_values(self): player = self.player() @@ -90,7 +100,11 @@ class TestGoByMajority(TestHardGoByMajority): def test_memory_depth_infinite_soft_is_true(self): opponent_actions = [C] * 50 + [D] * 100 + [C] * 52 actions = ( - [(C, C)] * 50 + [(C, D)] * 51 + [(D, D)] * 49 + [(D, C)] * 50 + [(C, C)] * 2 + [(C, C)] * 50 + + [(C, D)] * 51 + + [(D, D)] * 49 + + [(D, C)] * 50 + + [(C, C)] * 2 ) opponent = axl.MockPlayer(actions=opponent_actions) self.versus_test(opponent, expected_actions=actions) @@ -100,8 +114,12 @@ def test_memory_depth_even_soft_is_true(self): init_kwargs = {"memory_depth": memory_depth} opponent = axl.MockPlayer([C] * memory_depth + [D] * memory_depth) - actions = [(C, C)] * 4 + [(C, D)] * 3 + [(D, D)] + [(D, C)] * 2 + [(C, C)] * 2 - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + actions = ( + [(C, C)] * 4 + [(C, D)] * 3 + [(D, D)] + [(D, C)] * 2 + [(C, C)] * 2 + ) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) def test_name(self): player = self.player(soft=True) @@ -161,7 +179,9 @@ def test_strategy(self): else: cooperations = int(memory_depth * 1.5) - 1 defections = len(opponent_actions) - cooperations - 1 - player_actions = first_player_action + [C] * cooperations + [D] * defections + player_actions = ( + first_player_action + [C] * cooperations + [D] * defections + ) actions = list(zip(player_actions, opponent_actions)) self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/tests/strategies/test_grudger.py b/axelrod/tests/strategies/test_grudger.py index 9bd980b89..0f449cda6 100644 --- a/axelrod/tests/strategies/test_grudger.py +++ b/axelrod/tests/strategies/test_grudger.py @@ -258,11 +258,31 @@ def test_strategy(self): """Test strategy with multiple initial parameters""" # Testing default parameters of n=1, d=4, c=2 (same as Soft Grudger) - actions = [(C, D), (D, D), (D, C), (D, C), (D, D), (C, D), (C, C), (C, C)] - self.versus_test(axl.MockPlayer(actions=[D, D, C, C]), expected_actions=actions) + actions = [ + (C, D), + (D, D), + (D, C), + (D, C), + (D, D), + (C, D), + (C, C), + (C, C), + ] + self.versus_test( + axl.MockPlayer(actions=[D, D, C, C]), expected_actions=actions + ) # Testing n=2, d=4, c=2 - actions = [(C, D), (C, D), (D, C), (D, C), (D, D), (D, D), (C, C), (C, C)] + actions = [ + (C, D), + (C, D), + (D, C), + (D, C), + (D, D), + (D, D), + (C, C), + (C, C), + ] self.versus_test( axl.MockPlayer(actions=[D, D, C, C]), expected_actions=actions, @@ -270,7 +290,16 @@ def test_strategy(self): ) # Testing n=1, d=1, c=1 - actions = [(C, D), (D, D), (C, C), (C, C), (C, D), (D, D), (C, C), (C, C)] + actions = [ + (C, D), + (D, D), + (C, C), + (C, C), + (C, D), + (D, D), + (C, C), + (C, C), + ] self.versus_test( axl.MockPlayer(actions=[D, D, C, C]), expected_actions=actions, diff --git a/axelrod/tests/strategies/test_grumpy.py b/axelrod/tests/strategies/test_grumpy.py index 1fba6bbdd..fa0e5b327 100644 --- a/axelrod/tests/strategies/test_grumpy.py +++ b/axelrod/tests/strategies/test_grumpy.py @@ -46,32 +46,42 @@ def test_starting_state(self): actions = ([(C, D)] * 11 + [(D, C)] * 22 + [(C, D)] * 11) * 3 init_kwargs = {"starting_state": "Nice"} - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) opponent = axl.MockPlayer(actions=opponent_actions) grumpy_starting = [(D, D)] * 11 + [(D, C)] * 22 + [(C, D)] * 11 actions = grumpy_starting + actions init_kwargs = {"starting_state": "Grumpy"} - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) def test_thresholds(self): init_kwargs = {"grumpy_threshold": 3, "nice_threshold": -2} opponent_actions = [D] * 4 + [C] * 7 + [D] * 3 opponent = axl.MockPlayer(actions=opponent_actions) actions = ([(C, D)] * 4 + [(D, C)] * 7 + [(C, D)] * 3) * 3 - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) init_kwargs = {"grumpy_threshold": 0, "nice_threshold": -2} opponent_actions = [D] * 1 + [C] * 4 + [D] * 3 opponent = axl.MockPlayer(actions=opponent_actions) actions = ([(C, D)] * 1 + [(D, C)] * 4 + [(C, D)] * 3) * 3 - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) init_kwargs = {"grumpy_threshold": 3, "nice_threshold": 0} opponent_actions = [D] * 4 + [C] * 5 + [D] * 1 opponent = axl.MockPlayer(actions=opponent_actions) actions = ([(C, D)] * 4 + [(D, C)] * 5 + [(C, D)] * 1) * 3 - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) def test_reset_state_with_non_default_init(self): player = axl.Grumpy(starting_state="Grumpy") diff --git a/axelrod/tests/strategies/test_hmm.py b/axelrod/tests/strategies/test_hmm.py index e60d4e3dd..beffae2c5 100644 --- a/axelrod/tests/strategies/test_hmm.py +++ b/axelrod/tests/strategies/test_hmm.py @@ -193,17 +193,23 @@ class TestEvolvedHMM5(TestPlayer): def test_strategy(self): actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) class TestEvolvedHMM5vsCooperator(TestMatch): def test_rounds(self): - self.versus_test(axl.EvolvedHMM5(), axl.Cooperator(), [C] * 5, [C] * 5, seed=3) + self.versus_test( + axl.EvolvedHMM5(), axl.Cooperator(), [C] * 5, [C] * 5, seed=3 + ) class TestEvolvedHMM5vsDefector(TestMatch): def test_rounds(self): - self.versus_test(axl.EvolvedHMM5(), axl.Defector(), [C, C, D], [D, D, D], seed=5) + self.versus_test( + axl.EvolvedHMM5(), axl.Defector(), [C, C, D], [D, D, D], seed=5 + ) class TestEvolvableHMMPlayer(unittest.TestCase): @@ -218,9 +224,7 @@ def test_normalized_parameters(self): initial_action = C self.assertRaises( - InsufficientParametersError, - self.player_class, - seed=1 + InsufficientParametersError, self.player_class, seed=1 ) self.assertRaises( InsufficientParametersError, @@ -229,7 +233,7 @@ def test_normalized_parameters(self): transitions_D=transitions_D, emission_probabilities=emission_probabilities, initial_state=None, - seed=1 + seed=1, ) self.assertRaises( InsufficientParametersError, @@ -238,14 +242,14 @@ def test_normalized_parameters(self): transitions_D=transitions_D, emission_probabilities=emission_probabilities, initial_action=None, - seed=1 + seed=1, ) self.assertRaises( InsufficientParametersError, self.player_class, initial_state=initial_state, initial_action=initial_action, - seed=1 + seed=1, ) def test_vector_to_instance(self): diff --git a/axelrod/tests/strategies/test_human.py b/axelrod/tests/strategies/test_human.py index e0e2ca811..ffe35f5d0 100644 --- a/axelrod/tests/strategies/test_human.py +++ b/axelrod/tests/strategies/test_human.py @@ -27,7 +27,9 @@ def test_validator(self): ActionValidator().validate(test_document) test_document = TestDocument("E") - self.assertRaises(ValidationError, ActionValidator().validate, test_document) + self.assertRaises( + ValidationError, ActionValidator().validate, test_document + ) class TestHumanClass(TestPlayer): @@ -70,8 +72,8 @@ def test_status_messages(self): self.assertEqual(actual_messages, expected_messages) human.history.append(C, C) - expected_print_message = "{}Turn 1: human played C, opponent played C".format( - linesep + expected_print_message = ( + "{}Turn 1: human played C, opponent played C".format(linesep) ) actual_messages = human._status_messages() self.assertEqual(actual_messages["print"], expected_print_message) diff --git a/axelrod/tests/strategies/test_inverse.py b/axelrod/tests/strategies/test_inverse.py index 95ac95033..3d89e11dd 100644 --- a/axelrod/tests/strategies/test_inverse.py +++ b/axelrod/tests/strategies/test_inverse.py @@ -28,10 +28,22 @@ def test_strategy(self): # Tests that if opponent has played all D then player chooses D, # regardless of the random seed. - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 9, seed=None) + self.versus_test( + axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 9, seed=None + ) - expected_actions = [(C, D), (D, C), (D, C), (C, D), (D, C), (C, C), - (D, C), (C, C), (C, D), (D, D)] + expected_actions = [ + (C, D), + (D, C), + (D, C), + (C, D), + (D, C), + (C, C), + (D, C), + (C, C), + (C, D), + (D, D), + ] self.versus_test( axl.MockPlayer(actions=[a[1] for a in expected_actions]), expected_actions=expected_actions, diff --git a/axelrod/tests/strategies/test_lookerup.py b/axelrod/tests/strategies/test_lookerup.py index c48474388..08dc27ab3 100755 --- a/axelrod/tests/strategies/test_lookerup.py +++ b/axelrod/tests/strategies/test_lookerup.py @@ -20,6 +20,7 @@ C, D = axl.Action.C, axl.Action.D random = axl.RandomGenerator() + class TestLookupTable(unittest.TestCase): lookup_dict = { ((C, C), (C,), ()): C, @@ -69,7 +70,9 @@ def test_from_pattern(self): table = LookupTable.from_pattern( pattern, player_depth=2, op_depth=1, op_openings_depth=0 ) - self.assertEqual(table.dictionary, make_keys_into_plays(self.lookup_dict)) + self.assertEqual( + table.dictionary, make_keys_into_plays(self.lookup_dict) + ) def test_from_pattern_raises_error_pattern_len_ne_dict_size(self): too_big = (C,) * 17 @@ -151,7 +154,9 @@ def test_plays_equals_tuple(self): self.assertEqual(Plays(1, 2, 3), (1, 2, 3)) def test_plays_assign_values(self): - self.assertEqual(Plays(op_plays=2, self_plays=1, op_openings=3), Plays(1, 2, 3)) + self.assertEqual( + Plays(op_plays=2, self_plays=1, op_openings=3), Plays(1, 2, 3) + ) def test_make_keys_into_plays(self): old = {((C, D), (C,), ()): 1, ((D, D), (D,), ()): 2} @@ -307,7 +312,9 @@ def test_set_memory_depth(self): self.assertEqual(axl.Classifiers["memory_depth"](mem_depth_3), 3) mem_depth_inf = axl.LookerUp(pattern="CC", parameters=Plays(0, 0, 1)) - self.assertEqual(axl.Classifiers["memory_depth"](mem_depth_inf), float("inf")) + self.assertEqual( + axl.Classifiers["memory_depth"](mem_depth_inf), float("inf") + ) def test_strategy(self): actions = [(C, C), (C, D), (D, C), (C, D)] @@ -375,10 +382,13 @@ def test_opponent_starting_moves_table(self): def test_lookup_table_display(self): player = axl.LookerUp( - pattern="CCCC", parameters=Plays(self_plays=2, op_plays=0, op_openings=0) + pattern="CCCC", + parameters=Plays(self_plays=2, op_plays=0, op_openings=0), ) self.assertEqual( - player.lookup_table_display(("self_plays", "op_plays", "op_openings")), + player.lookup_table_display( + ("self_plays", "op_plays", "op_openings") + ), ( "self_plays | op_plays |op_openings\n" + " C, C , , : C,\n" @@ -420,12 +430,16 @@ def test_new_data(self): def test_vs_initial_defector(self): opponent = [D, C, C, D, D, C] expected = [(C, D), (D, C), (C, C), (D, D), (D, D), (D, C)] - self.versus_test(axl.MockPlayer(actions=opponent), expected_actions=expected) + self.versus_test( + axl.MockPlayer(actions=opponent), expected_actions=expected + ) def test_vs_initial_cooperator(self): opponent = [C, D, D, C, C, D] expected = [(C, C), (C, D), (D, D), (D, C), (D, C), (D, D)] - self.versus_test(axl.MockPlayer(actions=opponent), expected_actions=expected) + self.versus_test( + axl.MockPlayer(actions=opponent), expected_actions=expected + ) class TestEvolvedLookerUp2_2_2(TestPlayer): @@ -521,7 +535,14 @@ def test_vs_initial_defector(self): def test_vs_initial_d_c(self): opponent_actions = [D, C] + [C, D] * 3 - expected = [(C, D), (C, C)] + [(D, C), (C, D), (C, C), (D, D), (C, C), (C, D)] + expected = [(C, D), (C, C)] + [ + (D, C), + (C, D), + (C, C), + (D, D), + (C, C), + (C, D), + ] self.versus_test( axl.MockPlayer(actions=opponent_actions), expected_actions=expected ) @@ -604,7 +625,9 @@ def test_strategy(self): vs_alternator = [(D, C), (C, D)] + [(D, C), (D, D)] * 5 self.versus_test(axl.Alternator(), expected_actions=vs_alternator) - self.versus_test(axl.Cooperator(), expected_actions=[(D, C)] + [(C, C)] * 10) + self.versus_test( + axl.Cooperator(), expected_actions=[(D, C)] + [(C, C)] * 10 + ) self.versus_test( axl.Defector(), expected_actions=([(D, D), (C, D)] + [(D, D)] * 10) @@ -616,7 +639,11 @@ def test_convert_key(self): opponent_starting_plays = "" player_last_plays = "CC" opponent_last_plays = "D" - old_key = (opponent_starting_plays, player_last_plays, opponent_last_plays) + old_key = ( + opponent_starting_plays, + player_last_plays, + opponent_last_plays, + ) new_key = Plays(self_plays=(C, C), op_plays=(D,), op_openings=()) @@ -665,22 +692,20 @@ def test_normalized_parameters(self): pattern = ("".join([random.choice(("C", "D")) for _ in range(8)]),) self.assertRaises( - InsufficientParametersError, - self.player_class, - seed=1 + InsufficientParametersError, self.player_class, seed=1 ) self.assertRaises( InsufficientParametersError, self.player_class, pattern=pattern, initial_actions=initial_actions, - seed=1 + seed=1, ) self.assertRaises( InsufficientParametersError, self.player_class, lookup_dict=lookup_dict, - seed=1 + seed=1, ) @@ -708,7 +733,10 @@ class TestEvolvableLookerUp4(TestEvolvablePlayer): init_parameters = { "parameters": (2, 2, 2), "pattern": "".join([random.choice(("C", "D")) for _ in range(64)]), - "initial_actions": (C, C,), + "initial_actions": ( + C, + C, + ), } @@ -718,7 +746,10 @@ class TestEvolvableLookerUp5(TestEvolvablePlayer): parent_class = axl.LookerUp parent_kwargs = ["lookup_dict", "initial_actions"] init_parameters = { - "initial_actions": (C, C,), + "initial_actions": ( + C, + C, + ), "lookup_dict": { ((C, C), (C,), ()): C, ((C, C), (D,), ()): D, diff --git a/axelrod/tests/strategies/test_memoryone.py b/axelrod/tests/strategies/test_memoryone.py index 01e720618..f1e6513e9 100644 --- a/axelrod/tests/strategies/test_memoryone.py +++ b/axelrod/tests/strategies/test_memoryone.py @@ -69,11 +69,15 @@ class TestGTFT(TestPlayer): def test_strategy(self): actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) def test_strategy2(self): actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) def test_four_vector(self): (R, P, S, T) = axl.Game().RPST() @@ -111,11 +115,15 @@ def test_strategy(self): def test_strategy2(self): actions = [(C, D), (D, D), (D, D), (D, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=10) + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, seed=10 + ) def test_strategy3(self): actions = [(C, D), (D, D), (C, D), (D, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, seed=1 + ) class TestStochasticCooperator(TestPlayer): @@ -143,19 +151,27 @@ def test_four_vector(self): def test_strategy(self): actions = [(C, C), (D, D), (C, C), (C, D), (C, C), (D, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=113) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=113 + ) def test_strategy2(self): actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) def test_strategy3(self): actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=5) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=5 + ) def test_strategy4(self): actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=12) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=12 + ) class TestStochasticWSLS(TestPlayer): @@ -174,19 +190,27 @@ class TestStochasticWSLS(TestPlayer): def test_strategy(self): actions = [(C, C), (D, D), (C, C), (C, D), (D, C), (D, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=50) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=50 + ) def test_strategy2(self): actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) def test_strategy3(self): actions = [(C, D), (D, C), (D, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=2 + ) def test_strategy4(self): actions = [(C, D), (C, C), (C, D), (D, C), (D, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=23) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=23 + ) def test_four_vector(self): player = self.player() @@ -204,7 +228,8 @@ class TestMemoryOnePlayer(unittest.TestCase): def test_default_if_four_vector_not_set(self): player = MemoryOnePlayer() self.assertEqual( - player._four_vector, {(C, C): 1.0, (C, D): 0.0, (D, C): 0.0, (D, D): 1.0} + player._four_vector, + {(C, C): 1.0, (C, D): 0.0, (D, C): 0.0, (D, D): 1.0}, ) def test_exception_if_four_vector_not_set(self): @@ -246,11 +271,15 @@ def test_four_vector(self): def test_strategy(self): actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) def test_strategy2(self): actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=5 + ) class TestALLCorALLD(TestPlayer): @@ -269,11 +298,15 @@ class TestALLCorALLD(TestPlayer): def test_strategy(self): actions = [(D, C)] * 10 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions, seed=0) + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=0 + ) def test_strategy2(self): actions = [(C, C)] * 10 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=1 + ) class TestGenericReactiveStrategy(unittest.TestCase): @@ -287,13 +320,16 @@ class TestGenericReactiveStrategy(unittest.TestCase): def test_four_vector(self): self.assertEqual( - self.p1._four_vector, {(C, D): 0.0, (D, C): 0.0, (C, C): 0.0, (D, D): 0.0} + self.p1._four_vector, + {(C, D): 0.0, (D, C): 0.0, (C, C): 0.0, (D, D): 0.0}, ) self.assertEqual( - self.p2._four_vector, {(C, D): 0.0, (D, C): 1.0, (C, C): 1.0, (D, D): 0.0} + self.p2._four_vector, + {(C, D): 0.0, (D, C): 1.0, (C, C): 1.0, (D, D): 0.0}, ) self.assertEqual( - self.p3._four_vector, {(C, D): 0.5, (D, C): 1.0, (C, C): 1.0, (D, D): 0.5} + self.p3._four_vector, + {(C, D): 0.5, (D, C): 1.0, (C, C): 1.0, (D, D): 0.5}, ) def test_subclass(self): @@ -304,6 +340,7 @@ def test_subclass(self): class TestMemoryOneAlternator(TestAlternator): """Alternator is equivalent to MemoryOnePlayer((0, 0, 1, 1), C)""" + name = "Generic Memory One Player: (0, 0, 1, 1), C" player = lambda x: axl.MemoryOnePlayer(four_vector=(0, 0, 1, 1)) expected_classifier = { @@ -319,6 +356,7 @@ class TestMemoryOneAlternator(TestAlternator): class TestMemoryOneCooperator(TestCooperator): """Cooperator is equivalent to MemoryOnePlayer((1, 1, 1, 1), C)""" + name = "Generic Memory One Player: (1, 1, 1, 1), C" player = lambda x: axl.MemoryOnePlayer(four_vector=(1, 1, 1, 1)) expected_classifier = { @@ -334,6 +372,7 @@ class TestMemoryOneCooperator(TestCooperator): class TestMemoryOneDefector(TestDefector): """Defector is equivalent to MemoryOnePlayer((0, 0, 0, 0), D)""" + name = "Generic Memory One Player: (0, 0, 0, 0), D" player = lambda x: axl.MemoryOnePlayer(four_vector=(0, 0, 0, 0), initial=D) expected_classifier = { @@ -349,6 +388,7 @@ class TestMemoryOneDefector(TestDefector): class TestMemoryOneTitForTat(TestTitForTat): """TitForTat is equivalent to MemoryOnePlayer((1, 0, 1, 0), C)""" + name = "Generic Memory One Player: (1, 0, 1, 0), C" player = lambda x: axl.MemoryOnePlayer(four_vector=(1, 0, 1, 0)) expected_classifier = { @@ -364,6 +404,7 @@ class TestMemoryOneTitForTat(TestTitForTat): class TestMemoryOneWSLS(TestWinStayLoseShift): """WinStayLoseShift is equivalent to MemoryOnePlayer((1, 0, 0, 1), C)""" + name = "Generic Memory One Player: (1, 0, 0, 1), C" player = lambda x: axl.MemoryOnePlayer(four_vector=(1, 0, 0, 1)) expected_classifier = { diff --git a/axelrod/tests/strategies/test_memorytwo.py b/axelrod/tests/strategies/test_memorytwo.py index 339aef728..ee5265eac 100644 --- a/axelrod/tests/strategies/test_memorytwo.py +++ b/axelrod/tests/strategies/test_memorytwo.py @@ -80,9 +80,7 @@ def test_exception_if_probability_vector_outside_valid_values(self): class TestMemoryStochastic(TestPlayer): - name = ( - "Generic Memory Two Player" - ) + name = "Generic Memory Two Player" player = axl.MemoryTwoPlayer expected_classifier = { "memory_depth": 0, # Memory-two Sixteen-Vector @@ -156,7 +154,9 @@ def test_strategy(self): # tests states 3, 5 and 12 actions = [(C, D), (C, C), (D, C), (D, D), (D, D), (C, D)] - self.versus_test(opponent=axl.SuspiciousTitForTat(), expected_actions=actions) + self.versus_test( + opponent=axl.SuspiciousTitForTat(), expected_actions=actions + ) # tests state 1 actions = [(C, C), (C, C), (C, C), (C, C)] @@ -189,12 +189,15 @@ def test_strategy_mutually_cooperative(self): # tests states 1, 4 and 8 actions = [(C, D), (C, D), (D, D), (C, C), (C, C), (C, D)] self.versus_test( - opponent=axl.Cycler(["D", "D", "D", "C", "C"]), expected_actions=actions + opponent=axl.Cycler(["D", "D", "D", "C", "C"]), + expected_actions=actions, ) # tests states 3, 5 actions = [(C, D), (C, C), (D, C), (D, D), (C, D)] - self.versus_test(opponent=axl.SuspiciousTitForTat(), expected_actions=actions) + self.versus_test( + opponent=axl.SuspiciousTitForTat(), expected_actions=actions + ) class TestMEM2(TestPlayer): @@ -256,8 +259,11 @@ def test_strategy(self): class TestMemoryTwoCooperator(TestCooperator): """Cooperator is equivalent to MemoryTwoPlayer((1, 1, ..., 1), C)""" + name = "Generic Memory Two Player: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], (C, C)" - player = lambda x: axl.MemoryTwoPlayer(sixteen_vector=[1] * 16, initial=(C, C)) + player = lambda x: axl.MemoryTwoPlayer( + sixteen_vector=[1] * 16, initial=(C, C) + ) expected_classifier = { "memory_depth": 0, "stochastic": False, @@ -271,8 +277,11 @@ class TestMemoryTwoCooperator(TestCooperator): class TestMemoryTwoDefector(TestDefector): """Defector is equivalent to MemoryTwoPlayer((0, 0, ..., 0), D)""" + name = "Generic Memory Two Player: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], (D, D)" - player = lambda x: axl.MemoryTwoPlayer(sixteen_vector=[0] * 16, initial=(D, D)) + player = lambda x: axl.MemoryTwoPlayer( + sixteen_vector=[0] * 16, initial=(D, D) + ) expected_classifier = { "memory_depth": 0, "stochastic": False, @@ -292,8 +301,12 @@ def four_vector_to_sixteen_vector(four_vector): class TestMemoryTwoAlternator(TestAlternator): """Alternator is equivalent to MemoryTwoPlayer(0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1), C).""" + name = "Generic Memory Two Player: [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], (C, D)" - player = lambda x: axl.MemoryTwoPlayer(sixteen_vector=four_vector_to_sixteen_vector((0, 0, 1, 1)), initial=(C, D)) + player = lambda x: axl.MemoryTwoPlayer( + sixteen_vector=four_vector_to_sixteen_vector((0, 0, 1, 1)), + initial=(C, D), + ) expected_classifier = { "memory_depth": 1, "stochastic": False, diff --git a/axelrod/tests/strategies/test_meta.py b/axelrod/tests/strategies/test_meta.py index 971dae521..1920c5f84 100644 --- a/axelrod/tests/strategies/test_meta.py +++ b/axelrod/tests/strategies/test_meta.py @@ -2,6 +2,7 @@ import axelrod as axl from axelrod.classifier import Classifiers +from axelrod.tests.property import strategy_lists from hypothesis import given, settings from hypothesis.strategies import integers @@ -41,7 +42,9 @@ def classifier_test(self, expected_class_classifier=None): for t in player.team: try: - classifier["makes_use_of"].update(axl.Classifiers["make_use_of"](t)) + classifier["makes_use_of"].update( + axl.Classifiers["make_use_of"](t) + ) except KeyError: pass @@ -94,6 +97,27 @@ def test_update_histories(self): with self.assertRaises(TypeError): p.update_histories(C) + @settings(max_examples=5, deadline=None) + @given(opponent_list=strategy_lists(max_size=1)) + def test_players_return_valid_actions(self, opponent_list): + """ + Whenever a new strategy is added to the library this potentially + modifies the behaviour of meta strategies which in turn requires + modification of the tests. + + In https://github.com/Axelrod-Python/Axelrod/pull/1373 specific + behaviour tests for the meta strategies were removed. + + This test ensures that a valid example is always returned by checking + that the actions played are a subset of {C, D}. + """ + player = self.player() + opponent = opponent_list[0]() + match = axl.Match(players=(player, opponent)) + interactions = match.play() + player_actions = set(player_action for player_action, _ in interactions) + self.assertTrue(player_actions <= {C, D}) + class TestMetaMajority(TestMetaPlayer): name = "Meta Majority" @@ -250,7 +274,6 @@ class TestNiceMetaWinnerEnsemble(TestMetaPlayer): "manipulates_state": False, } - def test_strategy(self): actions = [(C, C)] * 8 self.versus_test( @@ -370,7 +393,9 @@ def test_strategy(self): opponent = axl.MockPlayer([C] * 100 + [D]) actions = [(C, C)] * 100 + [(C, D), (D, C)] self.versus_test( - opponent=opponent, expected_actions=actions, init_kwargs={"team": team} + opponent=opponent, + expected_actions=actions, + init_kwargs={"team": team}, ) @@ -382,14 +407,16 @@ class TestMetaMajorityMemoryOne(TestMetaPlayer): "stochastic": True, "inspects_source": False, "long_run_time": False, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "manipulates_source": False, "manipulates_state": False, } def test_strategy(self): actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) class TestMetaMajorityFiniteMemory(TestMetaPlayer): @@ -407,7 +434,9 @@ class TestMetaMajorityFiniteMemory(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) class TestMetaMajorityLongMemory(TestMetaPlayer): @@ -425,11 +454,15 @@ class TestMetaMajorityLongMemory(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=0) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=0 + ) def test_strategy2(self): actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) class TestMetaWinnerMemoryOne(TestMetaPlayer): @@ -438,7 +471,7 @@ class TestMetaWinnerMemoryOne(TestMetaPlayer): expected_classifier = { "memory_depth": float("inf"), # Long memory "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -447,7 +480,9 @@ class TestMetaWinnerMemoryOne(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) class TestMetaWinnerFiniteMemory(TestMetaPlayer): @@ -465,7 +500,9 @@ class TestMetaWinnerFiniteMemory(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) class TestMetaWinnerLongMemory(TestMetaPlayer): @@ -483,7 +520,9 @@ class TestMetaWinnerLongMemory(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=4) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=4 + ) class TestMetaWinnerDeterministic(TestMetaPlayer): @@ -519,7 +558,9 @@ class TestMetaWinnerStochastic(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) class TestMetaMixer(TestMetaPlayer): @@ -595,7 +636,12 @@ def test_strategy(self): ) team.append(axl.Defector) - distribution = [0.2, 0.5, 0.3, 0] # If add a defector but does not occur + distribution = [ + 0.2, + 0.5, + 0.3, + 0, + ] # If add a defector but does not occur self.versus_test( opponent=axl.Cooperator(), expected_actions=actions, @@ -639,7 +685,9 @@ def classifier_test(self, expected_class_classifier=None): def test_strategy(self): actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=11) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=11 + ) class TestNMWEStochastic(TestMetaPlayer): @@ -657,7 +705,9 @@ class TestNMWEStochastic(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=16) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=16 + ) class TestNMWEFiniteMemory(TestMetaPlayer): @@ -675,7 +725,9 @@ class TestNMWEFiniteMemory(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=7) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=7 + ) class TestNMWELongMemory(TestMetaPlayer): @@ -693,7 +745,9 @@ class TestNMWELongMemory(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=3) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=3 + ) class TestNMWEMemoryOne(TestMetaPlayer): @@ -711,7 +765,9 @@ class TestNMWEMemoryOne(TestMetaPlayer): def test_strategy(self): actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) class TestMemoryDecay(TestPlayer): @@ -820,5 +876,5 @@ def test_memory_alter_delete(self): opponent, expected_actions=actions, init_kwargs={"start_strategy": axl.Cooperator}, - seed=11 + seed=11, ) diff --git a/axelrod/tests/strategies/test_negation.py b/axelrod/tests/strategies/test_negation.py index 8c7542aaa..f74211b46 100644 --- a/axelrod/tests/strategies/test_negation.py +++ b/axelrod/tests/strategies/test_negation.py @@ -36,4 +36,6 @@ def test_strategy(self): opponent=axl.Cooperator(), expected_actions=actions, seed=1 ) actions = [(D, D), (C, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, seed=2 + ) diff --git a/axelrod/tests/strategies/test_player.py b/axelrod/tests/strategies/test_player.py index ec9fe051c..76c06e945 100644 --- a/axelrod/tests/strategies/test_player.py +++ b/axelrod/tests/strategies/test_player.py @@ -458,14 +458,22 @@ def test_reset_clone(self): clone = player.clone() self.assertEqual(player, clone) - @given(seed=integers(min_value=1, max_value=20000000), - turns=integers(min_value=5, max_value=10), - noise=integers(min_value=0, max_value=10)) + @given( + seed=integers(min_value=1, max_value=20000000), + turns=integers(min_value=5, max_value=10), + noise=integers(min_value=0, max_value=10), + ) @settings(max_examples=1, deadline=None) def test_clone_reproducible_play(self, seed, turns, noise): # Test that the cloned player produces identical play player = self.player() - if player.name in ["Darwin", "Human", "Mind Bender", "Mind Controller", "Mind Warper"]: + if player.name in [ + "Darwin", + "Human", + "Mind Bender", + "Mind Controller", + "Mind Warper", + ]: # Known exceptions return @@ -479,8 +487,15 @@ def test_clone_reproducible_play(self, seed, turns, noise): player_clone = player.clone() op = op.clone() op_clone = op.clone() - m1 = axl.Match((player, op), turns=turns, seed=seed, noise=noise/100.) - m2 = axl.Match((player_clone, op_clone), turns=turns, seed=seed, noise=noise/100.) + m1 = axl.Match( + (player, op), turns=turns, seed=seed, noise=noise / 100.0 + ) + m2 = axl.Match( + (player_clone, op_clone), + turns=turns, + seed=seed, + noise=noise / 100.0, + ) m1.play() m2.play() self.assertEqual(m1.result, m2.result) @@ -577,7 +592,7 @@ def versus_test( noise=noise, seed=seed, attrs=attrs, - match_attributes=match_attributes + match_attributes=match_attributes, ) def classifier_test(self, expected_class_classifier=None): @@ -610,13 +625,14 @@ def classifier_test(self, expected_class_classifier=None): axl.Classifiers[key](player), self.expected_classifier[key], msg="%s - Behaviour: %s != Expected Behaviour: %s" - % ( - key, - axl.Classifiers[key](player), - self.expected_classifier[key], - ), + % ( + key, + axl.Classifiers[key](player), + self.expected_classifier[key], + ), ) + class TestMatch(unittest.TestCase): """Test class for heads up play between two given players. Plays an axelrod match between the two players.""" @@ -631,7 +647,7 @@ def versus_test( noise=None, seed=None, match_attributes=None, - attrs=None + attrs=None, ): """Tests a sequence of outcomes for two given players.""" if len(expected_actions1) != len(expected_actions2): @@ -640,14 +656,22 @@ def versus_test( turns = len(expected_actions1) match = axl.Match( - (player1, player2), turns=turns, noise=noise, seed=seed, - match_attributes=match_attributes) + (player1, player2), + turns=turns, + noise=noise, + seed=seed, + match_attributes=match_attributes, + ) match.play() # Test expected sequence of plays from the match is as expected. - for i, (play, expected_play) in enumerate(zip(player1.history, expected_actions1)): + for i, (play, expected_play) in enumerate( + zip(player1.history, expected_actions1) + ): self.assertEqual((i, play), (i, expected_play)) - for i, (play, expected_play) in enumerate(zip(player2.history, expected_actions2)): + for i, (play, expected_play) in enumerate( + zip(player2.history, expected_actions2) + ): self.assertEqual((i, play), (i, expected_play)) # Test final player attributes are as expected diff --git a/axelrod/tests/strategies/test_prober.py b/axelrod/tests/strategies/test_prober.py index 229b3fe1e..7f9e3dd48 100644 --- a/axelrod/tests/strategies/test_prober.py +++ b/axelrod/tests/strategies/test_prober.py @@ -194,7 +194,28 @@ class TestProber4(TestPlayer): "manipulates_source": False, "manipulates_state": False, } - initial_sequence = [C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D] + initial_sequence = [ + C, + C, + D, + C, + D, + D, + D, + C, + C, + D, + C, + D, + C, + C, + D, + C, + D, + D, + C, + D, + ] def test_strategy(self): # Starts by playing CCDCDDDCCDCDCCDCDDCD. @@ -214,7 +235,9 @@ def test_strategy(self): for history in provocative_histories: opponent = axl.MockPlayer(history + [C] * 5) actions = list(zip(self.initial_sequence, history)) + [(D, C)] * 5 - self.versus_test(opponent=opponent, expected_actions=actions, attrs=attrs) + self.versus_test( + opponent=opponent, expected_actions=actions, attrs=attrs + ) # Otherwise cooperates for 5 rounds and plays TfT afterwards unprovocative_histories = [ @@ -230,7 +253,9 @@ def test_strategy(self): opponent = axl.MockPlayer(history + [D] * 5 + [C, C]) actions = list(zip(self.initial_sequence, history)) + [(C, D)] * 5 actions += [(D, C), (C, C)] - self.versus_test(opponent=opponent, expected_actions=actions, attrs=attrs) + self.versus_test( + opponent=opponent, expected_actions=actions, attrs=attrs + ) class TestHardProber(TestPlayer): diff --git a/axelrod/tests/strategies/test_qlearner.py b/axelrod/tests/strategies/test_qlearner.py index 948c8c02e..07f3eb30e 100644 --- a/axelrod/tests/strategies/test_qlearner.py +++ b/axelrod/tests/strategies/test_qlearner.py @@ -14,7 +14,7 @@ class TestRiskyQLearner(TestPlayer): expected_classifier = { "memory_depth": float("inf"), "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -41,7 +41,13 @@ def test_strategy(self): "CC2.0": {C: 2.7, D: 0}, "CCC3.0": {C: 0, D: 0}, }, - "Vs": {"": 0.9, "0.0": 2.7, "C1.0": 4.5, "CC2.0": 2.7, "CCC3.0": 0}, + "Vs": { + "": 0.9, + "0.0": 2.7, + "C1.0": 4.5, + "CC2.0": 2.7, + "CCC3.0": 0, + }, "prev_state": "CCC3.0", }, ) @@ -54,7 +60,7 @@ class TestArrogantQLearner(TestPlayer): expected_classifier = { "memory_depth": float("inf"), # Long memory "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -75,7 +81,13 @@ def test_strategy(self): "CC2.0": {C: 2.7, D: 0}, "CCC3.0": {C: 0, D: 0}, }, - "Vs": {"": 0.9, "0.0": 2.7, "C1.0": 4.5, "CC2.0": 2.7, "CCC3.0": 0}, + "Vs": { + "": 0.9, + "0.0": 2.7, + "C1.0": 4.5, + "CC2.0": 2.7, + "CCC3.0": 0, + }, "prev_state": "CCC3.0", }, ) @@ -88,7 +100,7 @@ class TestHesitantQLearner(TestPlayer): expected_classifier = { "memory_depth": float("inf"), # Long memory "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -109,7 +121,13 @@ def test_strategy(self): "DD0.0": {C: 0, D: 0}, "DDD0.0": {C: 0, D: 0}, }, - "Vs": {"": 0.1, "0.0": 0.0, "D0.0": 0.1, "DD0.0": 0.0, "DDD0.0": 0}, + "Vs": { + "": 0.1, + "0.0": 0.0, + "D0.0": 0.1, + "DD0.0": 0.0, + "DDD0.0": 0, + }, "prev_state": "DDD0.0", }, ) @@ -122,7 +140,7 @@ class TestCautiousQLearner(TestPlayer): expected_classifier = { "memory_depth": float("inf"), # Long memory "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -143,7 +161,13 @@ def test_strategy(self): "DD0.0": {C: 0, D: 0}, "DDD0.0": {C: 0, D: 0}, }, - "Vs": {"": 0.1, "0.0": 0.0, "D0.0": 0.1, "DD0.0": 0.0, "DDD0.0": 0}, + "Vs": { + "": 0.1, + "0.0": 0.0, + "D0.0": 0.1, + "DD0.0": 0.0, + "DDD0.0": 0, + }, "prev_state": "DDD0.0", }, ) diff --git a/axelrod/tests/strategies/test_rand.py b/axelrod/tests/strategies/test_rand.py index c9341ab87..03754a169 100644 --- a/axelrod/tests/strategies/test_rand.py +++ b/axelrod/tests/strategies/test_rand.py @@ -23,10 +23,14 @@ class TestRandom(TestPlayer): def test_deterministic(self): actions = [(D, C), (D, C), (D, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 0}) + self.versus_test( + axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 0} + ) actions = [(C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 1}) + self.versus_test( + axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 1} + ) def test_stochastic_behavior1(self): """Test that strategy is randomly picked (not affected by history).""" diff --git a/axelrod/tests/strategies/test_retaliate.py b/axelrod/tests/strategies/test_retaliate.py index cd8660733..e3918f1ff 100644 --- a/axelrod/tests/strategies/test_retaliate.py +++ b/axelrod/tests/strategies/test_retaliate.py @@ -93,14 +93,18 @@ def test_strategy(self): opponent = axl.Cooperator() actions = [(C, C)] * 5 self.versus_test( - opponent=opponent, expected_actions=actions, attrs={"retaliating": False} + opponent=opponent, + expected_actions=actions, + attrs={"retaliating": False}, ) # Retaliate after a (C, D) round. opponent = axl.MockPlayer([C, C, C, D, C]) actions = [(C, C), (C, C), (C, C), (C, D), (D, C), (D, C)] self.versus_test( - opponent=opponent, expected_actions=actions, attrs={"retaliating": True} + opponent=opponent, + expected_actions=actions, + attrs={"retaliating": True}, ) opponent = axl.Alternator() @@ -108,7 +112,9 @@ def test_strategy(self): # Count retaliations actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] self.versus_test( - opponent=opponent, expected_actions=actions, attrs={"retaliation_count": 3} + opponent=opponent, + expected_actions=actions, + attrs={"retaliation_count": 3}, ) opponent = axl.Alternator() diff --git a/axelrod/tests/strategies/test_revised_downing.py b/axelrod/tests/strategies/test_revised_downing.py index fa6897a85..8006e03ef 100644 --- a/axelrod/tests/strategies/test_revised_downing.py +++ b/axelrod/tests/strategies/test_revised_downing.py @@ -4,6 +4,7 @@ C, D = axl.Action.C, axl.Action.D + class TestRevisedDowning(TestPlayer): name = "Revised Downing" diff --git a/axelrod/tests/strategies/test_selfsteem.py b/axelrod/tests/strategies/test_selfsteem.py index d45730247..76b6e5db2 100644 --- a/axelrod/tests/strategies/test_selfsteem.py +++ b/axelrod/tests/strategies/test_selfsteem.py @@ -23,20 +23,33 @@ class TestSelfSteem(TestPlayer): def test_strategy1(self): # Check for f > 0.95, defect actions = ( - [(C, C), (C, C), (D, C), (D, C), (C, C), (D, C)] + [(C, C)] * 4 + [(D, C)] + [(C, C), (C, C), (D, C), (D, C), (C, C), (D, C)] + + [(C, C)] * 4 + + [(D, C)] ) self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) def test_strategy2(self): # Check for f < -0.95, cooperate - actions = [(D, C), (C, C), (D, C), (D, C), (C, C), (D, C), (C, C), (C, C)] + actions = [ + (D, C), + (C, C), + (D, C), + (D, C), + (C, C), + (D, C), + (C, C), + (C, C), + ] self.versus_test( opponent=axl.Cooperator(), expected_actions=actions, seed=10 ) def test_strategy3(self): actions = [(D, D)] + [(D, D)] * 5 + [(D, D), (C, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=10) + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, seed=10 + ) def test_strategy4(self): # Check for -0.3 < f < 0.3, random @@ -57,7 +70,9 @@ def test_strategy5(self): + [(D, D)] * 8 + [(C, D), (C, D), (D, D), (D, D), (D, D)] ) - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=32) + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, seed=32 + ) def test_strategy6(self): # Check for 0.95 > abs(f) > 0.3, follows TitForTat @@ -66,7 +81,9 @@ def test_strategy6(self): + [(C, D), (D, D), (C, D), (C, D), (D, D), (C, D)] + [(D, D)] * 5 ) - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=17) + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, seed=17 + ) def test_strategy7(self): actions = [ @@ -81,4 +98,6 @@ def test_strategy7(self): (C, C), (C, C), ] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions, seed=10) + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=10 + ) diff --git a/axelrod/tests/strategies/test_shortmem.py b/axelrod/tests/strategies/test_shortmem.py index 48dcd0138..525aea843 100644 --- a/axelrod/tests/strategies/test_shortmem.py +++ b/axelrod/tests/strategies/test_shortmem.py @@ -12,7 +12,7 @@ class TestShortMem(TestPlayer): name = "ShortMem" player = axl.ShortMem expected_classifier = { - "memory_depth": float('inf'), + "memory_depth": float("inf"), "stochastic": False, "makes_use_of": set(), "inspects_source": False, @@ -44,14 +44,26 @@ def test_strategy(self): ) # If neither of the above conditions are met, apply TitForTat - actions = [(C, D)] * 5 + [(C, C)] * 6 + [(C, D), (D, D), (D, D), (D, C), (C, C)] + actions = ( + [(C, D)] * 5 + + [(C, C)] * 6 + + [(C, D), (D, D), (D, D), (D, C), (C, C)] + ) self.versus_test( - opponent=axl.MockPlayer(actions=[D] * 5 + [C] * 6 + [D, D, D, C, C]), + opponent=axl.MockPlayer( + actions=[D] * 5 + [C] * 6 + [D, D, D, C, C] + ), expected_actions=actions, ) - actions = [(C, C)] * 5 + [(C, D)] * 6 + [(D, C), (C, C), (C, C), (C, D), (D, D)] + actions = ( + [(C, C)] * 5 + + [(C, D)] * 6 + + [(D, C), (C, C), (C, C), (C, D), (D, D)] + ) self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 5 + [D] * 6 + [C, C, C, D, D]), + opponent=axl.MockPlayer( + actions=[C] * 5 + [D] * 6 + [C, C, C, D, D] + ), expected_actions=actions, ) diff --git a/axelrod/tests/strategies/test_stalker.py b/axelrod/tests/strategies/test_stalker.py index 59b6d1370..224d66ef3 100644 --- a/axelrod/tests/strategies/test_stalker.py +++ b/axelrod/tests/strategies/test_stalker.py @@ -14,7 +14,7 @@ class TestStalker(TestPlayer): expected_classifier = { "memory_depth": float("inf"), "stochastic": True, - "makes_use_of": set(["game", "length"]), + "makes_use_of": {"game", "length"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -28,12 +28,14 @@ def test_strategy(self): # wish_score < current_average_score < very_good_score actions = [(C, C)] * 7 + [(C, D), (C, D), (C, C), (C, C), (D, C)] self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 7 + [D] * 2), expected_actions=actions + opponent=axl.MockPlayer(actions=[C] * 7 + [D] * 2), + expected_actions=actions, ) actions = [(C, C)] * 7 + [(C, D), (C, C), (D, C)] self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 7 + [D]), expected_actions=actions + opponent=axl.MockPlayer(actions=[C] * 7 + [D]), + expected_actions=actions, ) # current_average_score > 2 @@ -43,7 +45,8 @@ def test_strategy(self): # 1 < current_average_score < 2 actions = [(C, C)] * 7 + [(C, D)] * 4 + [(D, D)] self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 7 + [D] * 5), expected_actions=actions + opponent=axl.MockPlayer(actions=[C] * 7 + [D] * 5), + expected_actions=actions, ) def test_strategy2(self): @@ -75,7 +78,9 @@ def test_strategy4(self): # defect in last round actions = [(C, C)] * 199 + [(D, C)] self.versus_test( - axl.Cooperator(), expected_actions=actions, match_attributes={"length": 200} + axl.Cooperator(), + expected_actions=actions, + match_attributes={"length": 200}, ) # Force a defection before the end of the actual match which ensures diff --git a/axelrod/tests/strategies/test_titfortat.py b/axelrod/tests/strategies/test_titfortat.py index fb984ea4b..d262caddc 100644 --- a/axelrod/tests/strategies/test_titfortat.py +++ b/axelrod/tests/strategies/test_titfortat.py @@ -334,7 +334,10 @@ def test_strategy(self): self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, + attrs={ + "calm_count": 0, + "punish_count": 0, + }, ) opponent = axl.MockPlayer(actions=[D]) @@ -342,7 +345,10 @@ def test_strategy(self): self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, + attrs={ + "calm_count": 0, + "punish_count": 0, + }, ) opponent = axl.MockPlayer(actions=[D, C]) @@ -350,7 +356,10 @@ def test_strategy(self): self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 2, "punish_count": 0,}, + attrs={ + "calm_count": 2, + "punish_count": 0, + }, ) opponent = axl.MockPlayer(actions=[D, C, C]) @@ -358,7 +367,10 @@ def test_strategy(self): self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 1, "punish_count": 0,}, + attrs={ + "calm_count": 1, + "punish_count": 0, + }, ) opponent = axl.MockPlayer(actions=[D, C, D, C]) @@ -366,7 +378,10 @@ def test_strategy(self): self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, + attrs={ + "calm_count": 0, + "punish_count": 0, + }, ) opponent = axl.MockPlayer(actions=[D, C, D, C, C]) @@ -374,7 +389,10 @@ def test_strategy(self): self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, + attrs={ + "calm_count": 0, + "punish_count": 0, + }, ) opponent = axl.MockPlayer(actions=[D, C, D, C, C, C]) @@ -382,23 +400,50 @@ def test_strategy(self): self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, + attrs={ + "calm_count": 0, + "punish_count": 0, + }, ) opponent = axl.MockPlayer(actions=[D, C, D, C, C, C, D, C]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C), (C, D), (D, C)] + actions = [ + (C, D), + (D, C), + (C, D), + (C, C), + (C, C), + (C, C), + (C, D), + (D, C), + ] self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 2, "punish_count": 2,}, + attrs={ + "calm_count": 2, + "punish_count": 2, + }, ) opponent = axl.MockPlayer(actions=[D, C, D, C, C, D, D, D]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, D), (D, D), (D, D)] + actions = [ + (C, D), + (D, C), + (C, D), + (C, C), + (C, C), + (C, D), + (D, D), + (D, D), + ] self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 2, "punish_count": 1,}, + attrs={ + "calm_count": 2, + "punish_count": 1, + }, ) opponent = axl.Defector() @@ -459,7 +504,10 @@ def test_strategy(self): self.versus_test( opponent, expected_actions=actions, - attrs={"calm_count": 2, "punish_count": 42,}, + attrs={ + "calm_count": 2, + "punish_count": 42, + }, ) def test_specific_set_of_results(self): @@ -471,7 +519,10 @@ def test_specific_set_of_results(self): to a memory one player that start by defecting and only cooperates if both players cooperated in the previous round. """ - mistrust_with_bug = axl.MemoryOnePlayer(initial=D, four_vector=(1, 0, 0, 0),) + mistrust_with_bug = axl.MemoryOnePlayer( + initial=D, + four_vector=(1, 0, 0, 0), + ) players = [ self.player(), axl.TitForTat(), @@ -614,7 +665,16 @@ def test_strategy(self): ) opponent = axl.MockPlayer(actions=[D, C, D, C, C, C, D, C]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C), (C, D), (D, C)] + actions = [ + (C, D), + (D, C), + (C, D), + (C, C), + (C, C), + (C, C), + (C, D), + (D, C), + ] self.versus_test( opponent, expected_actions=actions, @@ -627,7 +687,16 @@ def test_strategy(self): ) opponent = axl.MockPlayer(actions=[D, C, D, C, C, D, D, D]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, D), (D, D), (D, D)] + actions = [ + (C, D), + (D, C), + (C, D), + (C, C), + (C, C), + (C, D), + (D, D), + (D, D), + ] self.versus_test( opponent, expected_actions=actions, @@ -669,7 +738,9 @@ def test_output_from_literature(self): ] turns = 1000 - tournament = axl.Tournament(players, turns=turns, repetitions=1, seed=75) + tournament = axl.Tournament( + players, turns=turns, repetitions=1, seed=75 + ) results = tournament.play(progress_bar=False) scores = [ round(average_score_per_turn * 1000, 1) @@ -714,8 +785,10 @@ def test_init(self): self.assertEqual(player._recorded_history, []) @given( - strategies=strategy_lists(strategies=deterministic_strategies, max_size=1), - turns=integers(min_value=1, max_value=20) + strategies=strategy_lists( + strategies=deterministic_strategies, max_size=1 + ), + turns=integers(min_value=1, max_value=20), ) @settings(deadline=None) def test_is_tit_for_tat_with_no_noise(self, strategies, turns): @@ -727,33 +800,59 @@ def test_is_tit_for_tat_with_no_noise(self, strategies, turns): self.assertEqual(m1.play(), m2.play()) def test_strategy_with_noise1(self): - self.versus_test(axl.Defector(), [(C, D)], turns=1, seed=9, - attrs={"_recorded_history": [C]}) + self.versus_test( + axl.Defector(), + [(C, D)], + turns=1, + seed=9, + attrs={"_recorded_history": [C]}, + ) def test_strategy_with_noise2(self): - self.versus_test(axl.Defector(), [(D, C)], turns=1, noise=0.5, seed=11, - attrs={"_recorded_history": [C]}) + self.versus_test( + axl.Defector(), + [(D, C)], + turns=1, + noise=0.5, + seed=11, + attrs={"_recorded_history": [C]}, + ) def test_strategy_with_noise3(self): # After noise: is contrite actions = list(zip([D, C], [C, D])) - self.versus_test(axl.Defector(), actions, turns=2, noise=0.5, seed=49, - attrs={"_recorded_history": [C, C], - "contrite": True}) + self.versus_test( + axl.Defector(), + actions, + turns=2, + noise=0.5, + seed=49, + attrs={"_recorded_history": [C, C], "contrite": True}, + ) def test_strategy_with_noise4(self): # Cooperates and no longer contrite actions = list(zip([D, C, C], [C, D, D])) - self.versus_test(axl.Defector(), actions, turns=3, noise=0.5, seed=49, - attrs={"_recorded_history": [C, C, C], - "contrite": False}) + self.versus_test( + axl.Defector(), + actions, + turns=3, + noise=0.5, + seed=49, + attrs={"_recorded_history": [C, C, C], "contrite": False}, + ) def test_strategy_with_noise5(self): # Defects and no longer contrite actions = list(zip([D, C, C, D], [C, D, D, D])) - self.versus_test(axl.Defector(), actions, turns=4, noise=0.5, seed=158, - attrs={"_recorded_history": [C, C, C, D], - "contrite": False}) + self.versus_test( + axl.Defector(), + actions, + turns=4, + noise=0.5, + seed=158, + attrs={"_recorded_history": [C, C, C, D], "contrite": False}, + ) class TestAdaptiveTitForTat(TestPlayer): @@ -900,7 +999,9 @@ class TestEugineNier(TestPlayer): def test_strategy(self): actions = [(C, C), (C, C), (C, C), (D, C)] self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"is_defector": False} + axl.Cooperator(), + expected_actions=actions, + attrs={"is_defector": False}, ) actions = [(C, C), (C, C), (C, C), (C, C)] @@ -914,7 +1015,9 @@ def test_strategy(self): # Plays TfT and defects in last round actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (D, D)] self.versus_test( - axl.Alternator(), expected_actions=actions, attrs={"is_defector": False} + axl.Alternator(), + expected_actions=actions, + attrs={"is_defector": False}, ) actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] @@ -927,7 +1030,16 @@ def test_strategy(self): # Becomes defector after 5 defections opponent = axl.MockPlayer(actions=[D, C, D, D, D, D, C, C]) - actions = [(C, D), (D, C), (C, D), (D, D), (D, D), (D, D), (D, C), (D, C)] + actions = [ + (C, D), + (D, C), + (C, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, C), + ] self.versus_test(opponent, expected_actions=actions) @@ -955,13 +1067,17 @@ def test_strategy(self): init_kwargs = {"N": 1, "M": 2} opponent = axl.MockPlayer(actions=[D, D, D, C, C]) actions = [(C, D), (C, D), (D, D), (D, C), (C, C), (C, D)] - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) # TwoTitsForTat test_strategy init_kwargs = {"N": 2, "M": 1} opponent = axl.MockPlayer(actions=[D, C, C, D, C]) actions = [(C, D), (D, C), (D, C), (C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + self.versus_test( + opponent, expected_actions=actions, init_kwargs=init_kwargs + ) actions = [(C, C), (C, C)] self.versus_test( opponent=axl.Cooperator(), @@ -970,7 +1086,9 @@ def test_strategy(self): ) actions = [(C, D), (D, D), (D, D)] self.versus_test( - opponent=axl.Defector(), expected_actions=actions, init_kwargs=init_kwargs, + opponent=axl.Defector(), + expected_actions=actions, + init_kwargs=init_kwargs, ) # Cooperator test_strategy @@ -1017,6 +1135,7 @@ def test_varying_memory_depth(self): class Test1TitsFor1TatsIsTFT(TestTitForTat): """Tests that for N = 1 = M, all the TFT tests are passed.""" + name = "N Tit(s) For M Tat(s): 1, 1" player = lambda x: axl.NTitsForMTats(1, 1) expected_classifier = { @@ -1032,6 +1151,7 @@ class Test1TitsFor1TatsIsTFT(TestTitForTat): class Test1TitsFor2TatsIsTF2T(TestTitFor2Tats): """Tests that for N = 1, M = 2, all the TF2T tests are passed.""" + name = "N Tit(s) For M Tat(s): 1, 2" player = lambda x: axl.NTitsForMTats(1, 2) expected_classifier = { @@ -1047,6 +1167,7 @@ class Test1TitsFor2TatsIsTF2T(TestTitFor2Tats): class Test2TitsFor1TatsIs2TFT(TestTwoTitsForTat): """Tests that for N = 2, M = 1, all the 2TFT tests are passed.""" + name = "N Tit(s) For M Tat(s): 2, 1" player = lambda x: axl.NTitsForMTats(2, 1) expected_classifier = { @@ -1165,7 +1286,9 @@ def test_strategy(self): ) actions = [(C, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions, init_kwargs={"p": 0}) + self.versus_test( + axl.Defector(), expected_actions=actions, init_kwargs={"p": 0} + ) actions = [(C, C), (C, C), (D, C), (C, C)] self.versus_test( @@ -1173,7 +1296,9 @@ def test_strategy(self): ) actions = [(C, D), (D, D), (C, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions, init_kwargs={"p": 1}) + self.versus_test( + axl.Defector(), expected_actions=actions, init_kwargs={"p": 1} + ) actions = [(C, C), (C, C), (D, C), (C, C), (C, C), (C, C)] self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) diff --git a/axelrod/tests/strategies/test_worse_and_worse.py b/axelrod/tests/strategies/test_worse_and_worse.py index 1fd2d5ef8..4403eb642 100644 --- a/axelrod/tests/strategies/test_worse_and_worse.py +++ b/axelrod/tests/strategies/test_worse_and_worse.py @@ -39,7 +39,7 @@ class TestKnowledgeableWorseAndWorse(TestPlayer): expected_classifier = { "memory_depth": float("inf"), "stochastic": True, - "makes_use_of": set(["length"]), + "makes_use_of": {"length"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -101,7 +101,8 @@ def test_strategy(self): def test_strategy2(self): actions = [(C, C), (C, C), (C, D), (D, C)] self.versus_test( - opponent=axl.MockPlayer(actions=[C, C, D, C]), expected_actions=actions + opponent=axl.MockPlayer(actions=[C, C, D, C]), + expected_actions=actions, ) def test_strategy3(self): diff --git a/axelrod/tests/strategies/test_zero_determinant.py b/axelrod/tests/strategies/test_zero_determinant.py index 75b98fb5f..e1e8c37b5 100644 --- a/axelrod/tests/strategies/test_zero_determinant.py +++ b/axelrod/tests/strategies/test_zero_determinant.py @@ -24,7 +24,7 @@ class TestZDExtortion(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -32,7 +32,12 @@ class TestZDExtortion(TestPlayer): } def test_four_vector(self): - expected_dictionary = {(C, C): 0.64, (C, D): 0.18, (D, C): 0.28, (D, D): 0} + expected_dictionary = { + (C, C): 0.64, + (C, D): 0.18, + (D, C): 0.28, + (D, D): 0, + } test_four_vector(self, expected_dictionary) def test_strategy(self): @@ -43,7 +48,9 @@ def test_strategy(self): def test_strategy2(self): actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=1) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=1 + ) class TestZDExtort2(TestPlayer): @@ -53,7 +60,7 @@ class TestZDExtort2(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -61,7 +68,12 @@ class TestZDExtort2(TestPlayer): } def test_four_vector(self): - expected_dictionary = {(C, C): 8 / 9, (C, D): 0.5, (D, C): 1 / 3, (D, D): 0.0} + expected_dictionary = { + (C, C): 8 / 9, + (C, D): 0.5, + (D, C): 1 / 3, + (D, D): 0.0, + } test_four_vector(self, expected_dictionary) def test_receive_match_attributes(self): @@ -83,11 +95,15 @@ def test_strategy2(self): def test_strategy3(self): actions = [(C, D), (D, C), (D, D), (D, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=10) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=10 + ) def test_strategy4(self): actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=7) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=7 + ) class TestZDExtort2v2(TestPlayer): @@ -97,7 +113,7 @@ class TestZDExtort2v2(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -121,7 +137,9 @@ def test_strategy(self): def test_strategy2(self): actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=2 + ) class TestZDExtort3(TestPlayer): @@ -130,7 +148,7 @@ class TestZDExtort3(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -154,7 +172,9 @@ def test_strategy(self): def test_strategy2(self): actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=2 + ) class TestZDExtort4(TestPlayer): @@ -164,7 +184,7 @@ class TestZDExtort4(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -172,7 +192,12 @@ class TestZDExtort4(TestPlayer): } def test_four_vector(self): - expected_dictionary = {(C, C): 11 / 17, (C, D): 0, (D, C): 8 / 17, (D, D): 0.0} + expected_dictionary = { + (C, C): 11 / 17, + (C, D): 0, + (D, C): 8 / 17, + (D, D): 0.0, + } test_four_vector(self, expected_dictionary) def test_strategy(self): @@ -183,7 +208,9 @@ def test_strategy(self): def test_strategy2(self): actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=10) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=10 + ) class TestZDGen2(TestPlayer): @@ -193,7 +220,7 @@ class TestZDGen2(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -201,7 +228,12 @@ class TestZDGen2(TestPlayer): } def test_four_vector(self): - expected_dictionary = {(C, C): 1, (C, D): 9 / 16, (D, C): 1 / 2, (D, D): 1 / 8} + expected_dictionary = { + (C, C): 1, + (C, D): 9 / 16, + (D, C): 1 / 2, + (D, D): 1 / 8, + } test_four_vector(self, expected_dictionary) def test_strategy(self): @@ -218,11 +250,15 @@ def test_strategy2(self): def test_strategy3(self): actions = [(C, D), (D, C), (D, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=10) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=10 + ) def test_strategy4(self): actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=3) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=3 + ) class TestZDGTFT2(TestPlayer): @@ -232,14 +268,19 @@ class TestZDGTFT2(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "inspects_source": False, "manipulates_source": False, "manipulates_state": False, } def test_four_vector(self): - expected_dictionary = {(C, C): 1.0, (C, D): 1 / 8, (D, C): 1.0, (D, D): 0.25} + expected_dictionary = { + (C, C): 1.0, + (C, D): 1 / 8, + (D, C): 1.0, + (D, D): 0.25, + } test_four_vector(self, expected_dictionary) def test_receive_match_attributes(self): @@ -261,11 +302,15 @@ def test_strategy2(self): def test_strategy3(self): actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=4) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=4 + ) def test_strategy4(self): actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=23) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=23 + ) class TestZDMischief(TestPlayer): @@ -275,7 +320,7 @@ class TestZDMischief(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -294,7 +339,9 @@ def test_strategy(self): def test_strategy2(self): actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=4) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=4 + ) class TestZDSet2(TestPlayer): @@ -304,7 +351,7 @@ class TestZDSet2(TestPlayer): expected_classifier = { "memory_depth": 1, "stochastic": True, - "makes_use_of": set(["game"]), + "makes_use_of": {"game"}, "long_run_time": False, "inspects_source": False, "manipulates_source": False, @@ -328,4 +375,6 @@ def test_strategy(self): def test_strategy2(self): actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=12) + self.versus_test( + opponent=axl.CyclerDC(), expected_actions=actions, seed=12 + ) diff --git a/axelrod/tests/unit/test_classification.py b/axelrod/tests/unit/test_classification.py index c5d90df5f..c44d23516 100644 --- a/axelrod/tests/unit/test_classification.py +++ b/axelrod/tests/unit/test_classification.py @@ -50,7 +50,9 @@ def tearDown(self) -> None: def test_classifier_build(self): dirname = os.path.dirname(__file__) - test_path = os.path.join(dirname, "../../../test_outputs/classifier_test.yaml") + test_path = os.path.join( + dirname, "../../../test_outputs/classifier_test.yaml" + ) # Just returns the name of the player. For testing. name_classifier = Classifier[Text]("name", lambda player: player.name) @@ -66,7 +68,10 @@ def test_classifier_build(self): self.assertDictEqual( all_player_dicts, - {"Cooperator": {"name": "Cooperator"}, "Defector": {"name": "Defector"}}, + { + "Cooperator": {"name": "Cooperator"}, + "Defector": {"name": "Defector"}, + }, ) def test_singletonity_of_classifiers_class(self): @@ -91,7 +96,9 @@ def test_key_error_on_uknown_classifier(self): Classifiers["invalid_key"](axl.TitForTat) def test_will_lookup_key_in_dict(self): - self.assertEqual(Classifiers["memory_depth"](TitForTatWithEmptyClassifier), 1) + self.assertEqual( + Classifiers["memory_depth"](TitForTatWithEmptyClassifier), 1 + ) def test_will_lookup_key_for_classes_that_cant_init(self): with self.assertRaises(Exception) as exptn: @@ -114,7 +121,9 @@ def test_known_classifiers(self): for s in axl.all_strategies: s = s() - self.assertTrue(None not in [Classifiers[key](s) for key in known_keys]) + self.assertTrue( + None not in [Classifiers[key](s) for key in known_keys] + ) def test_multiple_instances(self): """Certain instances of classes of strategies will have different @@ -185,13 +194,19 @@ def test_obey_axelrod(self): ] for strategy in known_cheaters: - self.assertFalse(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) + self.assertFalse( + axl.Classifiers.obey_axelrod(strategy()), msg=strategy + ) for strategy in known_basic: - self.assertTrue(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) + self.assertTrue( + axl.Classifiers.obey_axelrod(strategy()), msg=strategy + ) for strategy in known_ordinary: - self.assertTrue(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) + self.assertTrue( + axl.Classifiers.obey_axelrod(strategy()), msg=strategy + ) def test_is_basic(self): """A test that verifies if the is_basic function works correctly""" @@ -294,7 +309,9 @@ def test_inclusion_of_strategy_lists(self): axl.basic_strategies, axl.long_run_time_strategies, ]: - self.assertTrue(str_reps(strategy_list).issubset(str_reps(strategies_set))) + self.assertTrue( + str_reps(strategy_list).issubset(str_reps(strategies_set)) + ) def test_long_run_strategies(self): long_run_time_strategies = [ @@ -319,7 +336,8 @@ def test_long_run_strategies(self): ] self.assertEqual( - str_reps(long_run_time_strategies), str_reps(axl.long_run_time_strategies) + str_reps(long_run_time_strategies), + str_reps(axl.long_run_time_strategies), ) self.assertTrue( all(map(Classifiers["long_run_time"], axl.long_run_time_strategies)) @@ -331,10 +349,13 @@ def test_short_run_strategies(self): ] self.assertEqual( - str_reps(short_run_time_strategies), str_reps(axl.short_run_time_strategies) + str_reps(short_run_time_strategies), + str_reps(axl.short_run_time_strategies), ) self.assertFalse( - any(map(Classifiers["long_run_time"], axl.short_run_time_strategies)) + any( + map(Classifiers["long_run_time"], axl.short_run_time_strategies) + ) ) def test_meta_inclusion(self): @@ -353,4 +374,6 @@ def test_demo_strategies(self): axl.Grudger, axl.Random, ] - self.assertTrue(str_reps(demo_strategies), str_reps(axl.demo_strategies)) + self.assertTrue( + str_reps(demo_strategies), str_reps(axl.demo_strategies) + ) diff --git a/axelrod/tests/unit/test_compute_finite_state_machine_memory.py b/axelrod/tests/unit/test_compute_finite_state_machine_memory.py index 2b8ac5408..fd697b26f 100644 --- a/axelrod/tests/unit/test_compute_finite_state_machine_memory.py +++ b/axelrod/tests/unit/test_compute_finite_state_machine_memory.py @@ -98,7 +98,7 @@ def test_three_state_tft(self): (1, C, 2, C), (1, D, 0, D), (2, C, 0, C), - (2, D, 2, D) + (2, D, 2, D), ) trans_dict = self.transitions_to_dict(transitions) @@ -159,8 +159,7 @@ def test_tit_for_two_tat(self): self.assertEqual(get_memory_from_transitions(trans_dict), 2) def test_tit_for_five_tat(self): - """Analogous to tit for two tat above. - """ + """Analogous to tit for two tat above.""" transitions = ( (1, C, 1, C), (1, D, 2, C), @@ -227,8 +226,7 @@ def test_fortress_4(self): self.assertEqual(get_memory_from_transitions(trans_dict), 3) def test_complex_cooperator(self): - """Tests a cooperator with lots of states and transitions. - """ + """Tests a cooperator with lots of states and transitions.""" transitions = ( (0, C, 0, C), (0, D, 1, C), diff --git a/axelrod/tests/unit/test_deterministic_cache.py b/axelrod/tests/unit/test_deterministic_cache.py index e2c1d1ae3..7d3929a2d 100644 --- a/axelrod/tests/unit/test_deterministic_cache.py +++ b/axelrod/tests/unit/test_deterministic_cache.py @@ -18,7 +18,9 @@ def setUpClass(cls): cls.test_save_file = axl_filename(save_path) load_path = pathlib.Path("test_outputs/test_cache_load.txt") cls.test_load_file = axl_filename(load_path) - test_data_to_pickle = {("Tit For Tat", "Defector"): [(C, D), (D, D), (D, D)]} + test_data_to_pickle = { + ("Tit For Tat", "Defector"): [(C, D), (D, D), (D, D)] + } cls.test_pickle = pickle.dumps(test_data_to_pickle) with open(cls.test_load_file, "wb") as f: diff --git a/axelrod/tests/unit/test_filters.py b/axelrod/tests/unit/test_filters.py index 88133a6b0..70e11ed6e 100644 --- a/axelrod/tests/unit/test_filters.py +++ b/axelrod/tests/unit/test_filters.py @@ -18,10 +18,14 @@ class TestStrategy(Player): def test_equality_filter(self): self.assertTrue( - passes_operator_filter(self.TestStrategy, "stochastic", True, operator.eq) + passes_operator_filter( + self.TestStrategy, "stochastic", True, operator.eq + ) ) self.assertFalse( - passes_operator_filter(self.TestStrategy, "stochastic", False, operator.eq) + passes_operator_filter( + self.TestStrategy, "stochastic", False, operator.eq + ) ) self.assertTrue( passes_operator_filter( @@ -70,7 +74,9 @@ def test_list_filter(self): passes_in_list_filter(self.TestStrategy, "makes_use_of", ["length"]) ) self.assertTrue( - passes_in_list_filter(self.TestStrategy, "makes_use_of", ["game", "length"]) + passes_in_list_filter( + self.TestStrategy, "makes_use_of", ["game", "length"] + ) ) self.assertFalse( passes_in_list_filter(self.TestStrategy, "makes_use_of", "test") @@ -119,11 +125,21 @@ def test_passes_filterset(self, smaller, larger): "min_memory_depth": smaller, } - self.assertTrue(passes_filterset(self.TestStrategy, full_passing_filterset_1)) - self.assertTrue(passes_filterset(self.TestStrategy, full_passing_filterset_2)) - self.assertTrue(passes_filterset(self.TestStrategy, sparse_passing_filterset)) - self.assertFalse(passes_filterset(self.TestStrategy, full_failing_filterset)) - self.assertFalse(passes_filterset(self.TestStrategy, sparse_failing_filterset)) + self.assertTrue( + passes_filterset(self.TestStrategy, full_passing_filterset_1) + ) + self.assertTrue( + passes_filterset(self.TestStrategy, full_passing_filterset_2) + ) + self.assertTrue( + passes_filterset(self.TestStrategy, sparse_passing_filterset) + ) + self.assertFalse( + passes_filterset(self.TestStrategy, full_failing_filterset) + ) + self.assertFalse( + passes_filterset(self.TestStrategy, sparse_failing_filterset) + ) def test_filtered_strategies(self): class StochasticTestStrategy(Player): @@ -134,7 +150,11 @@ class StochasticTestStrategy(Player): } class MemoryDepth2TestStrategy(Player): - classifier = {"stochastic": False, "memory_depth": 2, "makes_use_of": []} + classifier = { + "stochastic": False, + "memory_depth": 2, + "makes_use_of": [], + } class UsesLengthTestStrategy(Player): classifier = { diff --git a/axelrod/tests/unit/test_fingerprint.py b/axelrod/tests/unit/test_fingerprint.py index 9c727f91e..7660550cf 100644 --- a/axelrod/tests/unit/test_fingerprint.py +++ b/axelrod/tests/unit/test_fingerprint.py @@ -90,7 +90,9 @@ def test_fingerprint_player(self): self.assertEqual(af.points, self.points_when_using_half_step) self.assertEqual(af.spatial_tournament.turns, 5) self.assertEqual(af.spatial_tournament.repetitions, 3) - self.assertEqual(af.spatial_tournament.edges, self.edges_when_using_half_step) + self.assertEqual( + af.spatial_tournament.edges, self.edges_when_using_half_step + ) # The first player is the fingerprinted one, the rest are probes. self.assertIsInstance(af.spatial_tournament.players[0], axl.Cooperator) @@ -129,7 +131,17 @@ def test_fingerprint_interactions_cooperator(self): # The keys are edges between players, values are repetitions. self.assertCountEqual( af.interactions.keys(), - [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9)], + [ + (0, 1), + (0, 2), + (0, 3), + (0, 4), + (0, 5), + (0, 6), + (0, 7), + (0, 8), + (0, 9), + ], ) self.assertEqual(len(af.interactions.values()), 9) @@ -187,7 +199,9 @@ def test_fingerprint_interactions_titfortat(self): def test_progress_bar_fingerprint(self): af = AshlockFingerprint(axl.TitForTat) - data = af.fingerprint(turns=10, repetitions=2, step=0.5, progress_bar=True) + data = af.fingerprint( + turns=10, repetitions=2, step=0.5, progress_bar=True + ) self.assertEqual(sorted(data.keys()), self.points_when_using_half_step) @patch("axelrod.fingerprint.mkstemp", RecordedMksTemp.mkstemp) @@ -216,7 +230,11 @@ def test_fingerprint_with_filename(self): filename = axl_filename(path) af = AshlockFingerprint(axl.TitForTat) af.fingerprint( - turns=1, repetitions=1, step=0.5, progress_bar=False, filename=filename + turns=1, + repetitions=1, + step=0.5, + progress_bar=False, + filename=filename, ) with open(filename, "r") as out: data = out.read() @@ -224,7 +242,9 @@ def test_fingerprint_with_filename(self): def test_serial_fingerprint(self): af = AshlockFingerprint(axl.TitForTat) - data = af.fingerprint(turns=10, repetitions=2, step=0.5, progress_bar=False) + data = af.fingerprint( + turns=10, repetitions=2, step=0.5, progress_bar=False + ) edge_keys = sorted(list(af.interactions.keys())) coord_keys = sorted(list(data.keys())) self.assertEqual(af.step, 0.5) @@ -244,9 +264,13 @@ def test_parallel_fingerprint(self): def test_plot_data(self): af = AshlockFingerprint(axl.Cooperator()) - af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False, seed=0) + af.fingerprint( + turns=5, repetitions=3, step=0.5, progress_bar=False, seed=0 + ) - reshaped_data = np.array([[0.0, 0.0, 0.0], [1., 2., 1.6], [3.0, 3.0, 3.0]]) + reshaped_data = np.array( + [[0.0, 0.0, 0.0], [1.0, 2.0, 1.6], [3.0, 3.0, 3.0]] + ) plotted_data = af.plot().gca().images[0].get_array() np.testing.assert_allclose(plotted_data, reshaped_data) @@ -292,12 +316,13 @@ def test_wsls_fingerprint(self): Point(x=1.0, y=0.25): 4.86, Point(x=1.0, y=0.5): 4.36, Point(x=1.0, y=0.75): 4.05, - Point(x=1.0, y=1.0): 1.3 + Point(x=1.0, y=1.0): 1.3, } af = axl.AshlockFingerprint(axl.WinStayLoseShift(), axl.TitForTat) - data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False, - seed=0) + data = af.fingerprint( + turns=50, repetitions=2, step=0.25, progress_bar=False, seed=0 + ) for key, value in data.items(): self.assertAlmostEqual(value, test_data[key], places=2) @@ -328,12 +353,13 @@ def test_tft_fingerprint(self): Point(x=1.0, y=0.25): 2.68, Point(x=1.0, y=0.5): 2.51, Point(x=1.0, y=0.75): 2.41, - Point(x=1.0, y=1.0): 2.18 + Point(x=1.0, y=1.0): 2.18, } af = axl.AshlockFingerprint(axl.TitForTat(), axl.TitForTat) - data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False, - seed=0) + data = af.fingerprint( + turns=50, repetitions=2, step=0.25, progress_bar=False, seed=0 + ) for key, value in data.items(): self.assertAlmostEqual(value, test_data[key], places=2) @@ -364,12 +390,13 @@ def test_majority_fingerprint(self): Point(x=1.0, y=0.25): 2.12, Point(x=1.0, y=0.5): 1.8599999999999999, Point(x=1.0, y=0.75): 2.0300000000000002, - Point(x=1.0, y=1.0): 2.26 + Point(x=1.0, y=1.0): 2.26, } af = axl.AshlockFingerprint(axl.GoByMajority, axl.TitForTat) - data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False, - seed=0) + data = af.fingerprint( + turns=50, repetitions=2, step=0.25, progress_bar=False, seed=0 + ) for key, value in data.items(): self.assertAlmostEqual(value, test_data[key], places=2) @@ -383,22 +410,30 @@ def test_pair_fingerprints(self, strategy_pair): """ strategy, probe = strategy_pair af = AshlockFingerprint(strategy, probe) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False, seed=1) + data = af.fingerprint( + turns=2, repetitions=2, step=0.5, progress_bar=False, seed=1 + ) self.assertIsInstance(data, dict) strategy, probe = strategy_pair af = AshlockFingerprint(strategy(), probe) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False, seed=2) + data = af.fingerprint( + turns=2, repetitions=2, step=0.5, progress_bar=False, seed=2 + ) self.assertIsInstance(data, dict) strategy, probe = strategy_pair af = AshlockFingerprint(strategy, probe()) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False, seed=3) + data = af.fingerprint( + turns=2, repetitions=2, step=0.5, progress_bar=False, seed=3 + ) self.assertIsInstance(data, dict) strategy, probe = strategy_pair af = AshlockFingerprint(strategy(), probe()) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False, seed=4) + data = af.fingerprint( + turns=2, repetitions=2, step=0.5, progress_bar=False, seed=4 + ) self.assertIsInstance(data, dict) @given(strategy_pair=strategy_lists(min_size=2, max_size=2)) @@ -410,11 +445,15 @@ def test_fingerprint_reproducibility(self, strategy_pair): """ strategy, probe = strategy_pair af = AshlockFingerprint(strategy(), probe()) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False, seed=4) + data = af.fingerprint( + turns=2, repetitions=2, step=0.5, progress_bar=False, seed=4 + ) strategy, probe = strategy_pair af = AshlockFingerprint(strategy(), probe()) - data2 = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False, seed=4) + data2 = af.fingerprint( + turns=2, repetitions=2, step=0.5, progress_bar=False, seed=4 + ) self.assertEqual(data, data2) @@ -425,13 +464,16 @@ def test_init(self): fingerprint = axl.TransitiveFingerprint(strategy=player) self.assertEqual(fingerprint.strategy, player) self.assertEqual( - fingerprint.opponents, [axl.Random(p) for p in np.linspace(0, 1, 50)] + fingerprint.opponents, + [axl.Random(p) for p in np.linspace(0, 1, 50)], ) def test_init_with_opponents(self): player = axl.TitForTat() opponents = [s() for s in axl.demo_strategies] - fingerprint = axl.TransitiveFingerprint(strategy=player, opponents=opponents) + fingerprint = axl.TransitiveFingerprint( + strategy=player, opponents=opponents + ) self.assertEqual(fingerprint.strategy, player) self.assertEqual(fingerprint.opponents, opponents) @@ -443,7 +485,8 @@ def test_init_with_not_default_number(self): ) self.assertEqual(fingerprint.strategy, player) self.assertEqual( - fingerprint.opponents, [axl.Random(p) for p in np.linspace(0, 1, 10)] + fingerprint.opponents, + [axl.Random(p) for p in np.linspace(0, 1, 10)], ) def test_fingerprint_with_filename(self): @@ -451,7 +494,9 @@ def test_fingerprint_with_filename(self): filename = axl_filename(path) strategy = axl.TitForTat() tf = TransitiveFingerprint(strategy) - tf.fingerprint(turns=1, repetitions=1, progress_bar=False, filename=filename) + tf.fingerprint( + turns=1, repetitions=1, progress_bar=False, filename=filename + ) with open(filename, "r") as out: data = out.read() self.assertEqual(len(data.split("\n")), 102) diff --git a/axelrod/tests/unit/test_graph.py b/axelrod/tests/unit/test_graph.py index 41f7d6d62..2ca72af31 100644 --- a/axelrod/tests/unit/test_graph.py +++ b/axelrod/tests/unit/test_graph.py @@ -39,8 +39,12 @@ def test_undirected_graph_with_vertices_and_unweighted_edges(self): self.assertEqual(str(g), "") self.assertEqual(g._edges, [(1, 2), (2, 1), (2, 3), (3, 2)]) - self.assert_out_mapping(g, {1: {2: None}, 2: {1: None, 3: None}, 3: {2: None}}) - self.assert_in_mapping(g, {1: {2: None}, 2: {1: None, 3: None}, 3: {2: None}}) + self.assert_out_mapping( + g, {1: {2: None}, 2: {1: None, 3: None}, 3: {2: None}} + ) + self.assert_in_mapping( + g, {1: {2: None}, 2: {1: None, 3: None}, 3: {2: None}} + ) def test_undirected_graph_with_vertices_and_weighted_edges(self): g = axl.graph.Graph(edges=[[1, 2, 10], [2, 3, 5]]) @@ -91,7 +95,11 @@ def test_add_loops_with_existing_loop_and_using_strings(self): g.add_loops() self.assertEqual( list(sorted(g._edges)), - list(sorted([("a", "b"), ("b", "a"), ("c", "c"), ("a", "a"), ("b", "b")])), + list( + sorted( + [("a", "b"), ("b", "a"), ("c", "c"), ("a", "a"), ("b", "b")] + ) + ), ) @@ -147,9 +155,19 @@ def test_length_4_undirected(self): edges = [(0, 1), (1, 0), (1, 2), (2, 1), (2, 3), (3, 2), (3, 0), (0, 3)] self.assertEqual(g.vertices, [0, 1, 2, 3]) self.assertEqual(g.edges, edges) - for vertex, neighbors in [(0, (1, 3)), (1, (0, 2)), (2, (1, 3)), (3, (0, 2))]: + for vertex, neighbors in [ + (0, (1, 3)), + (1, (0, 2)), + (2, (1, 3)), + (3, (0, 2)), + ]: self.assertEqual(set(g.out_vertices(vertex)), set(neighbors)) - for vertex, neighbors in [(0, (1, 3)), (1, (0, 2)), (2, (1, 3)), (3, (0, 2))]: + for vertex, neighbors in [ + (0, (1, 3)), + (1, (0, 2)), + (2, (1, 3)), + (3, (0, 2)), + ]: self.assertEqual(set(g.in_vertices(vertex)), set(neighbors)) @@ -210,7 +228,17 @@ def test_size_2_with_loops(self): def test_size_3_with_loops(self): g = axl.graph.complete_graph(3, loops=True) self.assertEqual(g.vertices, [0, 1, 2]) - edges = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (0, 0), (1, 1), (2, 2)] + edges = [ + (0, 1), + (1, 0), + (0, 2), + (2, 0), + (1, 2), + (2, 1), + (0, 0), + (1, 1), + (2, 2), + ] self.assertEqual(g.edges, edges) self.assertEqual(g.directed, False) @@ -246,59 +274,70 @@ def test_size_4_with_loops(self): class TestAttachedComplete(unittest.TestCase): def test_size_2(self): g = axl.graph.attached_complete_graphs(2, loops=False) - self.assertEqual(g.vertices, ['0:0', '0:1', '1:0', '1:1']) + self.assertEqual(g.vertices, ["0:0", "0:1", "1:0", "1:1"]) self.assertEqual( g.edges, - [('0:0', '0:1'), ('0:1', '0:0'), ('1:0', '1:1'), ('1:1', '1:0'), ('0:0', '1:0'), ('1:0', '0:0')] + [ + ("0:0", "0:1"), + ("0:1", "0:0"), + ("1:0", "1:1"), + ("1:1", "1:0"), + ("0:0", "1:0"), + ("1:0", "0:0"), + ], ) self.assertEqual(g.directed, False) def test_size_3(self): g = axl.graph.attached_complete_graphs(3, loops=False) - self.assertEqual(g.vertices, ['0:0', '0:1', '0:2', '1:0', '1:1', '1:2']) + self.assertEqual(g.vertices, ["0:0", "0:1", "0:2", "1:0", "1:1", "1:2"]) self.assertEqual( g.edges, - [('0:0', '0:1'), - ('0:1', '0:0'), - ('0:0', '0:2'), - ('0:2', '0:0'), - ('0:1', '0:2'), - ('0:2', '0:1'), - ('1:0', '1:1'), - ('1:1', '1:0'), - ('1:0', '1:2'), - ('1:2', '1:0'), - ('1:1', '1:2'), - ('1:2', '1:1'), - ('0:0', '1:0'), - ('1:0', '0:0')] + [ + ("0:0", "0:1"), + ("0:1", "0:0"), + ("0:0", "0:2"), + ("0:2", "0:0"), + ("0:1", "0:2"), + ("0:2", "0:1"), + ("1:0", "1:1"), + ("1:1", "1:0"), + ("1:0", "1:2"), + ("1:2", "1:0"), + ("1:1", "1:2"), + ("1:2", "1:1"), + ("0:0", "1:0"), + ("1:0", "0:0"), + ], ) self.assertEqual(g.directed, False) def test_size_3_with_loops(self): g = axl.graph.attached_complete_graphs(3, loops=True) - self.assertEqual(g.vertices, ['0:0', '0:1', '0:2', '1:0', '1:1', '1:2']) + self.assertEqual(g.vertices, ["0:0", "0:1", "0:2", "1:0", "1:1", "1:2"]) self.assertEqual( g.edges, - [('0:0', '0:1'), - ('0:1', '0:0'), - ('0:0', '0:2'), - ('0:2', '0:0'), - ('0:1', '0:2'), - ('0:2', '0:1'), - ('1:0', '1:1'), - ('1:1', '1:0'), - ('1:0', '1:2'), - ('1:2', '1:0'), - ('1:1', '1:2'), - ('1:2', '1:1'), - ('0:0', '1:0'), - ('1:0', '0:0'), - ('0:0', '0:0'), - ('0:1', '0:1'), - ('0:2', '0:2'), - ('1:0', '1:0'), - ('1:1', '1:1'), - ('1:2', '1:2')] + [ + ("0:0", "0:1"), + ("0:1", "0:0"), + ("0:0", "0:2"), + ("0:2", "0:0"), + ("0:1", "0:2"), + ("0:2", "0:1"), + ("1:0", "1:1"), + ("1:1", "1:0"), + ("1:0", "1:2"), + ("1:2", "1:0"), + ("1:1", "1:2"), + ("1:2", "1:1"), + ("0:0", "1:0"), + ("1:0", "0:0"), + ("0:0", "0:0"), + ("0:1", "0:1"), + ("0:2", "0:2"), + ("1:0", "1:0"), + ("1:1", "1:1"), + ("1:2", "1:2"), + ], ) self.assertEqual(g.directed, False) diff --git a/axelrod/tests/unit/test_history.py b/axelrod/tests/unit/test_history.py index e7b105c31..11cd2445c 100644 --- a/axelrod/tests/unit/test_history.py +++ b/axelrod/tests/unit/test_history.py @@ -83,8 +83,7 @@ def test_flip_plays(self): self.assertEqual(flipped_history, [D, C, D, C, D]) self.assertEqual(flipped_history.cooperations, 2) self.assertEqual(flipped_history.defections, 3) - self.assertEqual(flipped_history.state_distribution, - new_distribution) + self.assertEqual(flipped_history.state_distribution, new_distribution) # Flip operation is idempotent flipped_flipped_history = flipped_history.flip_plays() @@ -94,7 +93,6 @@ def test_flip_plays(self): class TestLimitedHistory(unittest.TestCase): - def test_memory_depth(self): h = LimitedHistory(memory_depth=3) h.append(C, C) @@ -105,8 +103,9 @@ def test_memory_depth(self): self.assertEqual(len(h), 3) self.assertEqual(h.cooperations, 2) self.assertEqual(h.defections, 1) - self.assertEqual(h.state_distribution, - Counter({(C, C): 1, (D, D): 1, (C, D): 1})) + self.assertEqual( + h.state_distribution, Counter({(C, C): 1, (D, D): 1, (C, D): 1}) + ) h.append(D, C) self.assertEqual(len(h), 3) self.assertEqual(h._plays, [D, C, D]) @@ -115,4 +114,5 @@ def test_memory_depth(self): self.assertEqual(h.defections, 2) self.assertEqual( h.state_distribution, - Counter({(D, D): 1, (C, D): 1, (D, C): 1, (C, C): 0})) + Counter({(D, D): 1, (C, D): 1, (D, C): 1, (C, C): 0}), + ) diff --git a/axelrod/tests/unit/test_interaction_utils.py b/axelrod/tests/unit/test_interaction_utils.py index 49706fe01..d56e62cce 100644 --- a/axelrod/tests/unit/test_interaction_utils.py +++ b/axelrod/tests/unit/test_interaction_utils.py @@ -49,39 +49,65 @@ def test_compute_scores(self): def test_compute_final_score(self): for inter, final_score in zip(self.interactions, self.final_scores): - self.assertEqual(final_score, axl.interaction_utils.compute_final_score(inter)) + self.assertEqual( + final_score, axl.interaction_utils.compute_final_score(inter) + ) def test_compute_final_score_per_turn(self): for inter, final_score_per_round in zip( self.interactions, self.final_score_per_turn ): self.assertEqual( - final_score_per_round, axl.interaction_utils.compute_final_score_per_turn(inter) + final_score_per_round, + axl.interaction_utils.compute_final_score_per_turn(inter), ) def test_compute_winner_index(self): for inter, winner in zip(self.interactions, self.winners): - self.assertEqual(winner, axl.interaction_utils.compute_winner_index(inter)) + self.assertEqual( + winner, axl.interaction_utils.compute_winner_index(inter) + ) def test_compute_cooperations(self): for inter, coop in zip(self.interactions, self.cooperations): - self.assertEqual(coop, axl.interaction_utils.compute_cooperations(inter)) + self.assertEqual( + coop, axl.interaction_utils.compute_cooperations(inter) + ) def test_compute_normalised_cooperations(self): for inter, coop in zip(self.interactions, self.normalised_cooperations): - self.assertEqual(coop, axl.interaction_utils.compute_normalised_cooperation(inter)) + self.assertEqual( + coop, + axl.interaction_utils.compute_normalised_cooperation(inter), + ) def test_compute_state_distribution(self): for inter, dist in zip(self.interactions, self.state_distribution): - self.assertEqual(dist, axl.interaction_utils.compute_state_distribution(inter)) + self.assertEqual( + dist, axl.interaction_utils.compute_state_distribution(inter) + ) def test_compute_normalised_state_distribution(self): - for inter, dist in zip(self.interactions, self.normalised_state_distribution): - self.assertEqual(dist, axl.interaction_utils.compute_normalised_state_distribution(inter)) + for inter, dist in zip( + self.interactions, self.normalised_state_distribution + ): + self.assertEqual( + dist, + axl.interaction_utils.compute_normalised_state_distribution( + inter + ), + ) def test_compute_state_to_action_distribution(self): - for inter, dist in zip(self.interactions, self.state_to_action_distribution): - self.assertEqual(dist, axl.interaction_utils.compute_state_to_action_distribution(inter)) + for inter, dist in zip( + self.interactions, self.state_to_action_distribution + ): + self.assertEqual( + dist, + axl.interaction_utils.compute_state_to_action_distribution( + inter + ), + ) inter = [(C, D), (D, C), (C, D), (D, C), (D, D), (C, C), (C, D)] expected_dist = [ Counter( @@ -93,17 +119,25 @@ def test_compute_state_to_action_distribution(self): ((D, D), C): 1, } ), - Counter({((C, C), D): 1, ((C, D), C): 2, ((D, C), D): 2, ((D, D), C): 1}), + Counter( + {((C, C), D): 1, ((C, D), C): 2, ((D, C), D): 2, ((D, D), C): 1} + ), ] - self.assertEqual(expected_dist, axl.interaction_utils.compute_state_to_action_distribution(inter)) + self.assertEqual( + expected_dist, + axl.interaction_utils.compute_state_to_action_distribution(inter), + ) def test_compute_normalised_state_to_action_distribution(self): for inter, dist in zip( self.interactions, self.normalised_state_to_action_distribution ): self.assertEqual( - dist, axl.interaction_utils.compute_normalised_state_to_action_distribution(inter) + dist, + axl.interaction_utils.compute_normalised_state_to_action_distribution( + inter + ), ) inter = [(C, D), (D, C), (C, D), (D, C), (D, D), (C, C), (C, D)] expected_dist = [ @@ -116,15 +150,22 @@ def test_compute_normalised_state_to_action_distribution(self): ((D, D), C): 1, } ), - Counter({((C, C), D): 1, ((C, D), C): 1, ((D, C), D): 1, ((D, D), C): 1}), + Counter( + {((C, C), D): 1, ((C, D), C): 1, ((D, C), D): 1, ((D, D), C): 1} + ), ] self.assertEqual( - expected_dist, axl.interaction_utils.compute_normalised_state_to_action_distribution(inter) + expected_dist, + axl.interaction_utils.compute_normalised_state_to_action_distribution( + inter + ), ) def test_compute_sparklines(self): for inter, spark in zip(self.interactions, self.sparklines): - self.assertEqual(spark, axl.interaction_utils.compute_sparklines(inter)) + self.assertEqual( + spark, axl.interaction_utils.compute_sparklines(inter) + ) def test_read_interactions_from_file(self): tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) @@ -137,10 +178,14 @@ def test_read_interactions_from_file(self): (0, 1): [[(C, D), (C, D)] for _ in range(3)], (1, 1): [[(D, D), (D, D)] for _ in range(3)], } - interactions = axl.interaction_utils.read_interactions_from_file(tmp_file.name, progress_bar=False) + interactions = axl.interaction_utils.read_interactions_from_file( + tmp_file.name, progress_bar=False + ) self.assertEqual(expected_interactions, interactions) def test_string_to_interactions(self): string = "CDCDDD" interactions = [(C, D), (C, D), (D, D)] - self.assertEqual(axl.interaction_utils.string_to_interactions(string), interactions) + self.assertEqual( + axl.interaction_utils.string_to_interactions(string), interactions + ) diff --git a/axelrod/tests/unit/test_match.py b/axelrod/tests/unit/test_match.py index 84040eaa5..6012ce32f 100644 --- a/axelrod/tests/unit/test_match.py +++ b/axelrod/tests/unit/test_match.py @@ -38,7 +38,9 @@ def test_init_with_prob_end(self, prob_end, game): self.assertEqual(match.noise, 0) self.assertEqual(match.game.RPST(), game.RPST()) - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) self.assertEqual(match._cache, {}) @given( @@ -56,7 +58,9 @@ def test_init_with_prob_end_and_turns(self, turns, prob_end, game): self.assertEqual(match.noise, 0) self.assertEqual(match.game.RPST(), game.RPST()) - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) self.assertEqual(match._cache, {}) def test_default_init(self): @@ -83,7 +87,9 @@ def test_example_prob_end(self): expected_lengths = [2, 1, 1] for seed, expected_length in zip(range(3), expected_lengths): match = axl.Match((p1, p2), prob_end=0.5, seed=seed) - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) self.assertEqual(len(match.play()), expected_length) self.assertEqual(match.noise, 0) self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) @@ -117,7 +123,7 @@ def test_len_error(self): with self.assertRaises(TypeError): len(match) - @given(p=floats(min_value=1e-10, max_value=1-1e-10)) + @given(p=floats(min_value=1e-10, max_value=1 - 1e-10)) def test_stochastic(self, p): p1, p2 = axl.Cooperator(), axl.Cooperator() match = axl.Match((p1, p2), 5) @@ -130,7 +136,7 @@ def test_stochastic(self, p): match = axl.Match((p1, p2), 5) self.assertTrue(match._stochastic) - @given(p=floats(min_value=1e-10, max_value=1-1e-10)) + @given(p=floats(min_value=1e-10, max_value=1 - 1e-10)) def test_cache_update_required(self, p): p1, p2 = axl.Cooperator(), axl.Cooperator() match = axl.Match((p1, p2), 5, noise=p) @@ -180,8 +186,7 @@ def test_cache_grows(self): self.assertEqual(match.play(), expected_result_5_turn) # The cache should now hold the 5-turn result.. self.assertEqual( - cache[(axl.Cooperator(), axl.Defector())], - expected_result_5_turn + cache[(axl.Cooperator(), axl.Defector())], expected_result_5_turn ) def test_cache_doesnt_shrink(self): @@ -200,8 +205,7 @@ def test_cache_doesnt_shrink(self): self.assertEqual(match.play(), expected_result_3_turn) # The cache should still hold the 5. self.assertEqual( - cache[(axl.Cooperator(), axl.Defector())], - expected_result_5_turn + cache[(axl.Cooperator(), axl.Defector())], expected_result_5_turn ) def test_scores(self): @@ -361,7 +365,9 @@ def test_sample_length(self): ]: rng = RandomGenerator(seed) r = rng.random() - self.assertEqual(axl.match.sample_length(prob_end, r), expected_length) + self.assertEqual( + axl.match.sample_length(prob_end, r), expected_length + ) def test_sample_with_0_prob(self): self.assertEqual(axl.match.sample_length(0, 0.4), float("inf")) diff --git a/axelrod/tests/unit/test_match_generator.py b/axelrod/tests/unit/test_match_generator.py index e91a7eba8..730928216 100644 --- a/axelrod/tests/unit/test_match_generator.py +++ b/axelrod/tests/unit/test_match_generator.py @@ -148,7 +148,9 @@ def test_build_single_match_params_with_fixed_length_unknown(self): self.assertEqual(match_params["game"], test_game) self.assertEqual(match_params["prob_end"], None) self.assertEqual(match_params["noise"], 0) - self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) + self.assertEqual( + match_params["match_attributes"], {"length": float("inf")} + ) # Check that can build a match players = [axl.Cooperator(), axl.Defector()] @@ -166,7 +168,7 @@ def test_build_match_chunks(self, repetitions): players=self.players, turns=test_turns, game=test_game, - repetitions=repetitions + repetitions=repetitions, ) chunks = list(rr.build_match_chunks()) match_definitions = [ @@ -177,10 +179,14 @@ def test_build_match_chunks(self, repetitions): (i, j, repetitions) for i in range(5) for j in range(i, 5) ] - self.assertEqual(sorted(match_definitions), sorted(expected_match_definitions)) + self.assertEqual( + sorted(match_definitions), sorted(expected_match_definitions) + ) - @given(repetitions=integers(min_value=1, max_value=test_repetitions), - seed=integers(min_value=1, max_value=4294967295),) + @given( + repetitions=integers(min_value=1, max_value=test_repetitions), + seed=integers(min_value=1, max_value=4294967295), + ) @settings(max_examples=5) def test_seeding_equality(self, repetitions, seed): rr1 = axl.MatchGenerator( @@ -188,7 +194,7 @@ def test_seeding_equality(self, repetitions, seed): turns=test_turns, game=test_game, repetitions=repetitions, - seed=seed + seed=seed, ) chunks1 = list(rr1.build_match_chunks()) rr2 = axl.MatchGenerator( @@ -196,7 +202,7 @@ def test_seeding_equality(self, repetitions, seed): turns=test_turns, game=test_game, repetitions=repetitions, - seed=seed + seed=seed, ) chunks2 = list(rr2.build_match_chunks()) self.assertEqual(chunks1, chunks2) @@ -207,7 +213,7 @@ def test_seeding_inequality(self, repetitions=10): turns=test_turns, game=test_game, repetitions=repetitions, - seed=0 + seed=0, ) chunks1 = list(rr1.build_match_chunks()) rr2 = axl.MatchGenerator( @@ -215,7 +221,7 @@ def test_seeding_inequality(self, repetitions=10): turns=test_turns, game=test_game, repetitions=repetitions, - seed=1 + seed=1, ) chunks2 = list(rr2.build_match_chunks()) self.assertNotEqual(chunks1, chunks2) @@ -239,7 +245,9 @@ def test_spatial_build_match_chunks(self, repetitions): ] expected_match_definitions = [(i, j, repetitions) for i, j in cycle] - self.assertEqual(sorted(match_definitions), sorted(expected_match_definitions)) + self.assertEqual( + sorted(match_definitions), sorted(expected_match_definitions) + ) def test_len(self): turns = 5 diff --git a/axelrod/tests/unit/test_moran.py b/axelrod/tests/unit/test_moran.py index 8018683c6..3d5e02734 100644 --- a/axelrod/tests/unit/test_moran.py +++ b/axelrod/tests/unit/test_moran.py @@ -23,16 +23,21 @@ def test_init(self): self.assertEqual(mp.noise, 0) self.assertEqual(mp.initial_players, players) self.assertEqual(mp.players, list(players)) - self.assertEqual(mp.populations, [Counter({"Cooperator": 1, "Defector": 1})]) + self.assertEqual( + mp.populations, [Counter({"Cooperator": 1, "Defector": 1})] + ) self.assertIsNone(mp.winning_strategy_name) self.assertEqual(mp.mutation_rate, 0) self.assertEqual(mp.mode, "bd") self.assertEqual(mp.deterministic_cache, axl.DeterministicCache()) self.assertEqual( - mp.mutation_targets, {"Cooperator": [players[1]], "Defector": [players[0]]} + mp.mutation_targets, + {"Cooperator": [players[1]], "Defector": [players[0]]}, ) self.assertEqual(mp.interaction_graph._edges, [(0, 1), (1, 0)]) - self.assertEqual(mp.reproduction_graph._edges, [(0, 1), (1, 0), (0, 0), (1, 1)]) + self.assertEqual( + mp.reproduction_graph._edges, [(0, 1), (1, 0), (0, 0), (1, 1)] + ) self.assertEqual(mp.fitness_transformation, None) self.assertEqual(mp.locations, [0, 1]) self.assertEqual(mp.index, {0: 0, 1: 1}) @@ -48,7 +53,9 @@ def test_init(self): sorted([(0, 1), (2, 0), (1, 2), (0, 0), (1, 1), (2, 2)]), ) - mp = axl.MoranProcess(players, interaction_graph=graph, reproduction_graph=graph) + mp = axl.MoranProcess( + players, interaction_graph=graph, reproduction_graph=graph + ) self.assertEqual(mp.interaction_graph._edges, [(0, 1), (2, 0), (1, 2)]) self.assertEqual(mp.reproduction_graph._edges, [(0, 1), (2, 0), (1, 2)]) @@ -225,8 +232,12 @@ def test_two_random_players(self): def test_two_players_with_mutation(self): p1, p2 = axl.Cooperator(), axl.Defector() - mp = MoranProcess((p1, p2), mutation_rate=0.2, stop_on_fixation=False, seed=5) - self.assertDictEqual(mp.mutation_targets, {str(p1): [p2], str(p2): [p1]}) + mp = MoranProcess( + (p1, p2), mutation_rate=0.2, stop_on_fixation=False, seed=5 + ) + self.assertDictEqual( + mp.mutation_targets, {str(p1): [p2], str(p2): [p1]} + ) # Test that mutation causes the population to alternate between # fixations counters = [ @@ -262,7 +273,9 @@ def test_three_players_with_mutation(self): p2 = axl.Random() p3 = axl.Defector() players = [p1, p2, p3] - mp = axl.MoranProcess(players, mutation_rate=0.2, stop_on_fixation=False) + mp = axl.MoranProcess( + players, mutation_rate=0.2, stop_on_fixation=False + ) self.assertDictEqual( mp.mutation_targets, {str(p1): [p3, p2], str(p2): [p1, p3], str(p3): [p1, p2]}, @@ -373,49 +386,61 @@ def test_cooperator_can_win_with_fitness_transformation(self): w = 0.95 fitness_transformation = lambda score: 1 - w + w * score mp = MoranProcess( - players, turns=10, fitness_transformation=fitness_transformation, - seed=3419 + players, + turns=10, + fitness_transformation=fitness_transformation, + seed=3419, ) populations = mp.play() self.assertEqual(mp.winning_strategy_name, "Cooperator") def test_atomic_mutation_fsm(self): - players = [axl.EvolvableFSMPlayer(num_states=2, initial_state=1, initial_action=C, - seed=4) - for _ in range(5)] + players = [ + axl.EvolvableFSMPlayer( + num_states=2, initial_state=1, initial_action=C, seed=4 + ) + for _ in range(5) + ] mp = MoranProcess(players, turns=10, mutation_method="atomic", seed=12) rounds = 10 for _ in range(rounds): next(mp) self.assertEqual( list(sorted(mp.populations[-1].items()))[0][0], - 'EvolvableFSMPlayer: ((0, C, 0, C), (0, D, 1, C), (1, C, 1, D), (1, D, 1, D)), 0, D, 2, 0.1, 2240802643') + "EvolvableFSMPlayer: ((0, C, 0, C), (0, D, 1, C), (1, C, 1, D), (1, D, 1, D)), 0, D, 2, 0.1, 2240802643", + ) self.assertEqual(len(mp.populations), 11) self.assertFalse(mp.fixated) def test_atomic_mutation_cycler(self): cycle_length = 5 - players = [axl.EvolvableCycler(cycle_length=cycle_length, seed=4) - for _ in range(5)] + players = [ + axl.EvolvableCycler(cycle_length=cycle_length, seed=4) + for _ in range(5) + ] mp = MoranProcess(players, turns=10, mutation_method="atomic", seed=10) rounds = 10 for _ in range(rounds): next(mp) self.assertEqual( list(mp.populations[-1].items())[0], - ('EvolvableCycler: CCDDD, 5, 0.2, 1, 1164244177', 1)) + ("EvolvableCycler: CCDDD, 5, 0.2, 1, 1164244177", 1), + ) self.assertEqual(len(mp.populations), 11) self.assertFalse(mp.fixated) def test_mutation_method_exceptions(self): cycle_length = 5 - players = [axl.EvolvableCycler(cycle_length=cycle_length, seed=4) - for _ in range(5)] + players = [ + axl.EvolvableCycler(cycle_length=cycle_length, seed=4) + for _ in range(5) + ] with self.assertRaises(ValueError): MoranProcess(players, turns=10, mutation_method="random", seed=10) - players = [axl.Cycler(cycle="CD" * random.randint(2, 10)) - for _ in range(10)] + players = [ + axl.Cycler(cycle="CD" * random.randint(2, 10)) for _ in range(10) + ] mp = MoranProcess(players, turns=10, mutation_method="atomic", seed=53) with self.assertRaises(TypeError): for _ in range(10): @@ -442,9 +467,12 @@ def test_complete(self): mp = MoranProcess(players, seed=seed) mp.play() winner = mp.winning_strategy_name - mp = MoranProcess(players, interaction_graph=interaction_graph, - reproduction_graph=reproduction_graph, - seed=seed) + mp = MoranProcess( + players, + interaction_graph=interaction_graph, + reproduction_graph=reproduction_graph, + seed=seed, + ) mp.play() winner2 = mp.winning_strategy_name self.assertEqual(winner, winner2) @@ -483,14 +511,18 @@ def test_asymmetry(self): players.append(axl.Defector()) for seed, outcome in seeds: mp = MoranProcess( - players, interaction_graph=graph1, reproduction_graph=graph2, - seed=seed + players, + interaction_graph=graph1, + reproduction_graph=graph2, + seed=seed, ) mp.play() winner = mp.winning_strategy_name mp = MoranProcess( - players, interaction_graph=graph2, reproduction_graph=graph1, - seed=seed + players, + interaction_graph=graph2, + reproduction_graph=graph1, + seed=seed, ) mp.play() winner2 = mp.winning_strategy_name @@ -508,10 +540,14 @@ def test_cycle_death_birth(self): for _ in range(N // 2): players.append(axl.Defector()) for seed, outcome in seeds: - mp = MoranProcess(players, interaction_graph=graph, mode="bd", seed=seed) + mp = MoranProcess( + players, interaction_graph=graph, mode="bd", seed=seed + ) mp.play() winner = mp.winning_strategy_name - mp = MoranProcess(players, interaction_graph=graph, mode="db", seed=seed) + mp = MoranProcess( + players, interaction_graph=graph, mode="db", seed=seed + ) mp.play() winner2 = mp.winning_strategy_name self.assertEqual((winner == winner2), outcome) @@ -541,7 +577,11 @@ def test_init(self): """Test the initialisation process""" self.assertEqual( set(self.amp.cached_outcomes.keys()), - {("Cooperator", "Defector"), ("Cooperator", "Cooperator"), ("Defector", "Defector")}, + { + ("Cooperator", "Defector"), + ("Cooperator", "Cooperator"), + ("Defector", "Defector"), + }, ) self.assertEqual(self.amp.players, self.players) self.assertEqual(self.amp.turns, 0) diff --git a/axelrod/tests/unit/test_pickling.py b/axelrod/tests/unit/test_pickling.py index 29f25d645..dff35ba96 100644 --- a/axelrod/tests/unit/test_pickling.py +++ b/axelrod/tests/unit/test_pickling.py @@ -10,7 +10,9 @@ # First set: special cases -PointerToWrappedStrategy = axl.strategy_transformers.FlipTransformer()(axl.strategy_transformers.FlipTransformer()(axl.Cooperator)) +PointerToWrappedStrategy = axl.strategy_transformers.FlipTransformer()( + axl.strategy_transformers.FlipTransformer()(axl.Cooperator) +) class MyDefector(axl.Player): @@ -21,8 +23,10 @@ def strategy(self, opponent): return D -PointerToWrappedClassNotInStrategies = axl.strategy_transformers.FlipTransformer()( - axl.strategy_transformers.FlipTransformer()(MyDefector) +PointerToWrappedClassNotInStrategies = ( + axl.strategy_transformers.FlipTransformer()( + axl.strategy_transformers.FlipTransformer()(MyDefector) + ) ) @@ -111,7 +115,9 @@ class JossAnn(axl.Cooperator): probability = [0.2, 0.3] -@axl.strategy_transformers.MixedTransformer(probability, strategies, name_prefix=None) +@axl.strategy_transformers.MixedTransformer( + probability, strategies, name_prefix=None +) class Mixed(axl.Cooperator): pass @@ -159,7 +165,9 @@ def __init__(self): super().__init__(team=team) -TransformedMetaThue = axl.strategy_transformers.IdentityTransformer(name_prefix=None)(MetaThue) +TransformedMetaThue = axl.strategy_transformers.IdentityTransformer( + name_prefix=None +)(MetaThue) transformed_no_prefix = [ @@ -233,12 +241,24 @@ def test_parameterized_player(self): self.assert_original_equals_pickled(player) def test_sequence_player(self): - inline_transformed_thue = axl.strategy_transformers.IdentityTransformer(name_prefix="Transformed")(axl.ThueMorse)() - for player in [axl.ThueMorse(), axl.ThueMorseInverse(), MetaThue(), TransformedMetaThue(), - inline_transformed_thue, TransformedThue(), - ]: + inline_transformed_thue = axl.strategy_transformers.IdentityTransformer( + name_prefix="Transformed" + )(axl.ThueMorse)() + for player in [ + axl.ThueMorse(), + axl.ThueMorseInverse(), + MetaThue(), + TransformedMetaThue(), + inline_transformed_thue, + TransformedThue(), + ]: self.assert_equals_instance_from_pickling(player) - opponents = (axl.Defector, axl.Cooperator, axl.Random, axl.CyclerCCCDCD) + opponents = ( + axl.Defector, + axl.Cooperator, + axl.Random, + axl.CyclerCCCDCD, + ) for opponent_class in opponents: player.reset() opponent = opponent_class() @@ -271,9 +291,13 @@ def test_pickling_all_transformers_as_instance_called_on_a_class(self): self.assert_original_equals_pickled(player) def test_created_on_the_spot_multiple_transformers(self): - player_class = axl.strategy_transformers.FlipTransformer()(axl.Cooperator) + player_class = axl.strategy_transformers.FlipTransformer()( + axl.Cooperator + ) player_class = axl.strategy_transformers.DualTransformer()(player_class) - player = axl.strategy_transformers.FinalTransformer((C, D))(player_class)() + player = axl.strategy_transformers.FinalTransformer((C, D))( + player_class + )() self.assert_original_equals_pickled(player) @@ -290,9 +314,13 @@ def test_dual_transformer_regression_test(self): player_class = axl.WinStayLoseShift player_class = axl.strategy_transformers.DualTransformer()(player_class) - player_class = axl.strategy_transformers.InitialTransformer((C, D))(player_class) + player_class = axl.strategy_transformers.InitialTransformer((C, D))( + player_class + ) player_class = axl.strategy_transformers.DualTransformer()(player_class) - player_class = axl.strategy_transformers.TrackHistoryTransformer()(player_class) + player_class = axl.strategy_transformers.TrackHistoryTransformer()( + player_class + ) interspersed_dual_transformers = player_class() @@ -314,7 +342,8 @@ def test_class_and_instance_name_different_built_from_player_class(self): player = MyCooperator() class_names = [class_.__name__ for class_ in MyCooperator.mro()] self.assertEqual( - class_names, ["FlippedMyCooperator", "MyCooperator", "Player", "object"] + class_names, + ["FlippedMyCooperator", "MyCooperator", "Player", "object"], ) self.assert_original_equals_pickled(player) @@ -374,17 +403,23 @@ def test_with_various_name_prefixes(self): self.assertEqual(no_prefix.__class__.__name__, "Flip") self.assert_original_equals_pickled(no_prefix) - default_prefix = axl.strategy_transformers.FlipTransformer()(axl.Cooperator)() + default_prefix = axl.strategy_transformers.FlipTransformer()( + axl.Cooperator + )() self.assertEqual(default_prefix.__class__.__name__, "FlippedCooperator") self.assert_original_equals_pickled(default_prefix) - fliptastic = axl.strategy_transformers.FlipTransformer(name_prefix="Fliptastic") + fliptastic = axl.strategy_transformers.FlipTransformer( + name_prefix="Fliptastic" + ) new_prefix = fliptastic(axl.Cooperator)() self.assertEqual(new_prefix.__class__.__name__, "FliptasticCooperator") self.assert_original_equals_pickled(new_prefix) def test_dynamic_class_no_name_prefix(self): - player = axl.strategy_transformers.FlipTransformer(name_prefix=None)(axl.Cooperator)() + player = axl.strategy_transformers.FlipTransformer(name_prefix=None)( + axl.Cooperator + )() self.assertEqual(player.__class__.__name__, "Cooperator") self.assert_original_equals_pickled(player) diff --git a/axelrod/tests/unit/test_plot.py b/axelrod/tests/unit/test_plot.py index 847419035..f5f621c55 100644 --- a/axelrod/tests/unit/test_plot.py +++ b/axelrod/tests/unit/test_plot.py @@ -32,7 +32,11 @@ def setUpClass(cls): [3 / 2 for _ in range(3)], ] cls.expected_boxplot_xticks_locations = [1, 2, 3, 4] - cls.expected_boxplot_xticks_labels = ["Defector", "Tit For Tat", "Alternator"] + cls.expected_boxplot_xticks_labels = [ + "Defector", + "Tit For Tat", + "Alternator", + ] cls.expected_lengthplot_dataset = [ [cls.turns for _ in range(3)], @@ -41,9 +45,21 @@ def setUpClass(cls): ] cls.expected_payoff_dataset = [ - [0, mean([9 / 5 for _ in range(3)]), mean([17 / 5 for _ in range(3)])], - [mean([4 / 5 for _ in range(3)]), 0, mean([13 / 5 for _ in range(3)])], - [mean([2 / 5 for _ in range(3)]), mean([13 / 5 for _ in range(3)]), 0], + [ + 0, + mean([9 / 5 for _ in range(3)]), + mean([17 / 5 for _ in range(3)]), + ], + [ + mean([4 / 5 for _ in range(3)]), + 0, + mean([13 / 5 for _ in range(3)]), + ], + [ + mean([2 / 5 for _ in range(3)]), + mean([13 / 5 for _ in range(3)]), + 0, + ], ] cls.expected_winplot_dataset = ( [[2, 2, 2], [0, 0, 0], [0, 0, 0]], @@ -104,12 +120,15 @@ def test_init_from_resulsetfromfile(self): def test_boxplot_dataset(self): plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._boxplot_dataset, self.expected_boxplot_dataset) + self.assertSequenceEqual( + plot._boxplot_dataset, self.expected_boxplot_dataset + ) def test_boxplot_xticks_locations(self): plot = axl.Plot(self.test_result_set) self.assertEqual( - plot._boxplot_xticks_locations, self.expected_boxplot_xticks_locations + plot._boxplot_xticks_locations, + self.expected_boxplot_xticks_locations, ) def test_boxplot_xticks_labels(self): @@ -145,7 +164,9 @@ def test_boxplot_with_title(self): def test_winplot_dataset(self): plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._winplot_dataset, self.expected_winplot_dataset) + self.assertSequenceEqual( + plot._winplot_dataset, self.expected_winplot_dataset + ) def test_winplot(self): plot = axl.Plot(self.test_result_set) @@ -155,7 +176,9 @@ def test_winplot(self): def test_sdvplot_dataset(self): plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._sdv_plot_dataset, self.expected_sdvplot_dataset) + self.assertSequenceEqual( + plot._sdv_plot_dataset, self.expected_sdvplot_dataset + ) def test_sdvplot(self): plot = axl.Plot(self.test_result_set) @@ -165,7 +188,9 @@ def test_sdvplot(self): def test_lengthplot_dataset(self): plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._winplot_dataset, self.expected_winplot_dataset) + self.assertSequenceEqual( + plot._winplot_dataset, self.expected_winplot_dataset + ) def test_lengthplot(self): plot = axl.Plot(self.test_result_set) @@ -181,7 +206,9 @@ def test_pdplot(self): def test_payoff_dataset(self): plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._payoff_dataset, self.expected_payoff_dataset) + self.assertSequenceEqual( + plot._payoff_dataset, self.expected_payoff_dataset + ) def test_payoff(self): plot = axl.Plot(self.test_result_set) @@ -250,6 +277,8 @@ def test_all_plots(self): ) self.assertIsNone( plot.save_all_plots( - prefix="test_outputs/", title_prefix="A prefix", progress_bar=True + prefix="test_outputs/", + title_prefix="A prefix", + progress_bar=True, ) ) diff --git a/axelrod/tests/unit/test_property.py b/axelrod/tests/unit/test_property.py index ae992a035..945aae5be 100644 --- a/axelrod/tests/unit/test_property.py +++ b/axelrod/tests/unit/test_property.py @@ -12,7 +12,9 @@ ) from hypothesis import given, settings -stochastic_strategies = [s for s in axl.strategies if axl.Classifiers["stochastic"](s())] +stochastic_strategies = [ + s for s in axl.strategies if axl.Classifiers["stochastic"](s()) +] class TestStrategyList(unittest.TestCase): @@ -130,7 +132,11 @@ def test_decorator(self, tournament): self.assertLessEqual(tournament.repetitions, 50) self.assertGreaterEqual(tournament.repetitions, 2) - @given(tournament=prob_end_tournaments(strategies=axl.basic_strategies, max_size=3)) + @given( + tournament=prob_end_tournaments( + strategies=axl.basic_strategies, max_size=3 + ) + ) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): self.assertIsInstance(tournament, axl.Tournament) @@ -165,7 +171,11 @@ def test_decorator(self, tournament): self.assertLessEqual(tournament.repetitions, 50) self.assertGreaterEqual(tournament.repetitions, 2) - @given(tournament=spatial_tournaments(strategies=axl.basic_strategies, max_size=3)) + @given( + tournament=spatial_tournaments( + strategies=axl.basic_strategies, max_size=3 + ) + ) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): self.assertIsInstance(tournament, axl.Tournament) diff --git a/axelrod/tests/unit/test_random_.py b/axelrod/tests/unit/test_random_.py index 19a84c2b0..e64ee5536 100644 --- a/axelrod/tests/unit/test_random_.py +++ b/axelrod/tests/unit/test_random_.py @@ -76,7 +76,7 @@ class TestPdf(unittest.TestCase): def test_init(self): self.assertEqual(set(self.pdf.sample_space), set(self.counter.keys())) - self.assertEqual(set(self.pdf.counts), set([4, 12, 2, 15])) + self.assertEqual(set(self.pdf.counts), {4, 12, 2, 15}) self.assertEqual(self.pdf.total, sum([4, 12, 2, 15])) self.assertAlmostEqual(sum(self.pdf.probability), 1) diff --git a/axelrod/tests/unit/test_resultset.py b/axelrod/tests/unit/test_resultset.py index 97563d3e9..bb0aa2091 100644 --- a/axelrod/tests/unit/test_resultset.py +++ b/axelrod/tests/unit/test_resultset.py @@ -71,15 +71,39 @@ def setUpClass(cls): # Recalculating to deal with numeric imprecision cls.expected_payoff_matrix = [ - [0, mean([13 / 5 for _ in range(3)]), mean([2 / 5 for _ in range(3)])], - [mean([13 / 5 for _ in range(3)]), 0, mean([4 / 5 for _ in range(3)])], - [mean([17 / 5 for _ in range(3)]), mean([9 / 5 for _ in range(3)]), 0], + [ + 0, + mean([13 / 5 for _ in range(3)]), + mean([2 / 5 for _ in range(3)]), + ], + [ + mean([13 / 5 for _ in range(3)]), + 0, + mean([4 / 5 for _ in range(3)]), + ], + [ + mean([17 / 5 for _ in range(3)]), + mean([9 / 5 for _ in range(3)]), + 0, + ], ] cls.expected_payoff_stddevs = [ - [0, std([13 / 5 for _ in range(3)]), std([2 / 5 for _ in range(3)])], - [std([13 / 5 for _ in range(3)]), 0, std([4 / 5 for _ in range(3)])], - [std([17 / 5 for _ in range(3)]), std([9 / 5 for _ in range(3)]), 0], + [ + 0, + std([13 / 5 for _ in range(3)]), + std([2 / 5 for _ in range(3)]), + ], + [ + std([13 / 5 for _ in range(3)]), + 0, + std([4 / 5 for _ in range(3)]), + ], + [ + std([17 / 5 for _ in range(3)]), + std([9 / 5 for _ in range(3)]), + 0, + ], ] cls.expected_cooperation = [[0, 9, 9], [9, 0, 3], [0, 0, 0]] @@ -88,8 +112,16 @@ def setUpClass(cls): cls.expected_initial_cooperation_rate = [1, 1, 0] cls.expected_normalised_cooperation = [ - [0, mean([3 / 5 for _ in range(3)]), mean([3 / 5 for _ in range(3)])], - [mean([3 / 5 for _ in range(3)]), 0, mean([1 / 5 for _ in range(3)])], + [ + 0, + mean([3 / 5 for _ in range(3)]), + mean([3 / 5 for _ in range(3)]), + ], + [ + mean([3 / 5 for _ in range(3)]), + 0, + mean([1 / 5 for _ in range(3)]), + ], [0, 0, 0], ] @@ -176,7 +208,11 @@ def setUpClass(cls): cls.expected_good_partner_rating = [1.0, 1.0, 0] - cls.expected_eigenjesus_rating = [0.5547001962252291, 0.8320502943378436, 0.0] + cls.expected_eigenjesus_rating = [ + 0.5547001962252291, + 0.8320502943378436, + 0.0, + ] cls.expected_eigenmoses_rating = [ -0.4578520302117101, @@ -337,7 +373,9 @@ def test_score_diffs(self): for i, row in enumerate(rs.score_diffs): for j, col in enumerate(row): for k, score in enumerate(col): - self.assertAlmostEqual(score, self.expected_score_diffs[i][j][k]) + self.assertAlmostEqual( + score, self.expected_score_diffs[i][j][k] + ) def test_payoff_diffs_means(self): rs = axl.ResultSet( @@ -347,7 +385,9 @@ def test_payoff_diffs_means(self): self.assertEqual(len(rs.payoff_diffs_means), rs.num_players) for i, row in enumerate(rs.payoff_diffs_means): for j, col in enumerate(row): - self.assertAlmostEqual(col, self.expected_payoff_diffs_means[i][j]) + self.assertAlmostEqual( + col, self.expected_payoff_diffs_means[i][j] + ) def test_payoff_stddevs(self): rs = axl.ResultSet( @@ -372,7 +412,8 @@ def test_initial_cooperation_count(self): self.assertIsInstance(rs.initial_cooperation_count, list) self.assertEqual(len(rs.initial_cooperation_count), rs.num_players) self.assertEqual( - rs.initial_cooperation_count, self.expected_initial_cooperation_count + rs.initial_cooperation_count, + self.expected_initial_cooperation_count, ) def test_normalised_cooperation(self): @@ -383,7 +424,9 @@ def test_normalised_cooperation(self): self.assertEqual(len(rs.normalised_cooperation), rs.num_players) for i, row in enumerate(rs.normalised_cooperation): for j, col in enumerate(row): - self.assertAlmostEqual(col, self.expected_normalised_cooperation[i][j]) + self.assertAlmostEqual( + col, self.expected_normalised_cooperation[i][j] + ) def test_initial_cooperation_rate(self): rs = axl.ResultSet( @@ -401,7 +444,9 @@ def test_state_distribution(self): ) self.assertIsInstance(rs.state_distribution, list) self.assertEqual(len(rs.state_distribution), rs.num_players) - self.assertEqual(rs.state_distribution, self.expected_state_distribution) + self.assertEqual( + rs.state_distribution, self.expected_state_distribution + ) def test_state_normalised_distribution(self): rs = axl.ResultSet( @@ -446,7 +491,9 @@ def test_vengeful_cooperation(self): self.assertEqual(len(rs.vengeful_cooperation), rs.num_players) for i, row in enumerate(rs.vengeful_cooperation): for j, col in enumerate(row): - self.assertAlmostEqual(col, self.expected_vengeful_cooperation[i][j]) + self.assertAlmostEqual( + col, self.expected_vengeful_cooperation[i][j] + ) def test_cooperating_rating(self): rs = axl.ResultSet( @@ -454,7 +501,9 @@ def test_cooperating_rating(self): ) self.assertIsInstance(rs.cooperating_rating, list) self.assertEqual(len(rs.cooperating_rating), rs.num_players) - self.assertEqual(rs.cooperating_rating, self.expected_cooperating_rating) + self.assertEqual( + rs.cooperating_rating, self.expected_cooperating_rating + ) def test_good_partner_matrix(self): rs = axl.ResultSet( @@ -462,7 +511,9 @@ def test_good_partner_matrix(self): ) self.assertIsInstance(rs.good_partner_matrix, list) self.assertEqual(len(rs.good_partner_matrix), rs.num_players) - self.assertEqual(rs.good_partner_matrix, self.expected_good_partner_matrix) + self.assertEqual( + rs.good_partner_matrix, self.expected_good_partner_matrix + ) def test_good_partner_rating(self): rs = axl.ResultSet( @@ -470,7 +521,9 @@ def test_good_partner_rating(self): ) self.assertIsInstance(rs.good_partner_rating, list) self.assertEqual(len(rs.good_partner_rating), rs.num_players) - self.assertEqual(rs.good_partner_rating, self.expected_good_partner_rating) + self.assertEqual( + rs.good_partner_rating, self.expected_good_partner_rating + ) def test_eigenjesus_rating(self): rs = axl.ResultSet( @@ -502,7 +555,10 @@ def test_self_interaction_for_random_strategies(self): def test_equality(self): rs_sets = [ axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False + self.filename, + self.players, + self.repetitions, + progress_bar=False, ) for _ in range(2) ] @@ -532,25 +588,34 @@ def test_summarise(self): [float(player.Median_score) for player in sd], ranked_median_scores ) - ranked_cooperation_rating = [rs.cooperating_rating[i] for i in rs.ranking] + ranked_cooperation_rating = [ + rs.cooperating_rating[i] for i in rs.ranking + ] self.assertEqual( [float(player.Cooperation_rating) for player in sd], ranked_cooperation_rating, ) ranked_median_wins = [nanmedian(rs.wins[i]) for i in rs.ranking] - self.assertEqual([float(player.Wins) for player in sd], ranked_median_wins) + self.assertEqual( + [float(player.Wins) for player in sd], ranked_median_wins + ) ranked_initial_coop_rates = [ self.expected_initial_cooperation_rate[i] for i in rs.ranking ] self.assertEqual( - [float(player.Initial_C_rate) for player in sd], ranked_initial_coop_rates + [float(player.Initial_C_rate) for player in sd], + ranked_initial_coop_rates, ) for player in sd: self.assertEqual( - player.CC_rate + player.CD_rate + player.DC_rate + player.DD_rate, 1 + player.CC_rate + + player.CD_rate + + player.DC_rate + + player.DD_rate, + 1, ) for rate in [ player.CC_to_C_rate, @@ -727,13 +792,21 @@ def setUpClass(cls): # Recalculating to deal with numeric imprecision cls.expected_payoff_matrix = [ - [0, mean([13 / 5 for _ in range(3)]), mean([2 / 5 for _ in range(3)])], + [ + 0, + mean([13 / 5 for _ in range(3)]), + mean([2 / 5 for _ in range(3)]), + ], [mean([13 / 5 for _ in range(3)]), 0, 0], [mean([17 / 5 for _ in range(3)]), 0, 0], ] cls.expected_payoff_stddevs = [ - [0, std([13 / 5 for _ in range(3)]), std([2 / 5 for _ in range(3)])], + [ + 0, + std([13 / 5 for _ in range(3)]), + std([2 / 5 for _ in range(3)]), + ], [std([13 / 5 for _ in range(3)]), 0, 0], [std([17 / 5 for _ in range(3)]), 0, 0], ] @@ -741,7 +814,11 @@ def setUpClass(cls): cls.expected_cooperation = [[0, 9, 9], [9, 0, 0], [0, 0, 0]] cls.expected_normalised_cooperation = [ - [0, mean([3 / 5 for _ in range(3)]), mean([3 / 5 for _ in range(3)])], + [ + 0, + mean([3 / 5 for _ in range(3)]), + mean([3 / 5 for _ in range(3)]), + ], [mean([3 / 5 for _ in range(3)]), 0, 0], [0, 0, 0], ] @@ -760,7 +837,11 @@ def setUpClass(cls): cls.expected_good_partner_rating = [1.0, 1.0, 0.0] - cls.expected_eigenjesus_rating = [0.447213595499958, 0.894427190999916, 0.0] + cls.expected_eigenjesus_rating = [ + 0.447213595499958, + 0.894427190999916, + 0.0, + ] cls.expected_eigenmoses_rating = [ -0.32929277996907086, @@ -784,7 +865,11 @@ def setUpClass(cls): Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), Counter({(C, D): 0.6, (D, D): 0.4}), ], - [Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), Counter(), Counter()], + [ + Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), + Counter(), + Counter(), + ], [Counter({(D, C): 0.6, (D, D): 0.4}), Counter(), Counter()], ] @@ -813,7 +898,11 @@ def setUpClass(cls): Counter(), Counter(), ], - [Counter({((D, C), D): 1.0, ((D, D), D): 1.0}), Counter(), Counter()], + [ + Counter({((D, C), D): 1.0, ((D, D), D): 1.0}), + Counter(), + Counter(), + ], ] def test_match_lengths(self): @@ -870,7 +959,8 @@ def setUpClass(cls): cls.edges = [(0, 1), (2, 3)] cls.expected_match_lengths = [ - [[0, 5, 0, 0], [5, 0, 0, 0], [0, 0, 0, 5], [0, 0, 5, 0]] for _ in range(3) + [[0, 5, 0, 0], [5, 0, 0, 0], [0, 0, 0, 5], [0, 0, 5, 0]] + for _ in range(3) ] cls.expected_scores = [ @@ -912,10 +1002,30 @@ def setUpClass(cls): ] cls.expected_score_diffs = [ - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [5.0, 5.0, 5.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-5.0, -5.0, -5.0], [0.0, 0.0, 0.0]], + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ], + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [5.0, 5.0, 5.0], + ], + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [-5.0, -5.0, -5.0], + [0.0, 0.0, 0.0], + ], ] cls.expected_payoff_diffs_means = [ @@ -1072,14 +1182,17 @@ def setUpClass(cls): cls.edges = [(0, 0), (1, 1), (2, 2), (3, 3)] cls.expected_match_lengths = [ - [[5, 0, 0, 0], [0, 5, 0, 0], [0, 0, 5, 0], [0, 0, 0, 5]] for _ in range(3) + [[5, 0, 0, 0], [0, 5, 0, 0], [0, 0, 5, 0], [0, 0, 0, 5]] + for _ in range(3) ] cls.expected_scores = [[0 for _ in range(3)] for _ in range(4)] cls.expected_wins = [[0 for _ in range(3)] for _ in range(4)] - cls.expected_normalised_scores = [[0 for _ in range(3)] for i in range(4)] + cls.expected_normalised_scores = [ + [0 for _ in range(3)] for i in range(4) + ] cls.expected_ranking = [0, 1, 2, 3] @@ -1107,7 +1220,9 @@ def setUpClass(cls): [[0.0 for _ in range(3)] for _ in range(4)] for _ in range(4) ] - cls.expected_payoff_diffs_means = [[0.0 for _ in range(4)] for _ in range(4)] + cls.expected_payoff_diffs_means = [ + [0.0 for _ in range(4)] for _ in range(4) + ] # Recalculating to deal with numeric imprecision cls.expected_payoff_matrix = [ @@ -1148,7 +1263,9 @@ def setUpClass(cls): cls.expected_cooperating_rating = [0.0 for _ in range(4)] - cls.expected_good_partner_matrix = [[0.0 for _ in range(4)] for _ in range(4)] + cls.expected_good_partner_matrix = [ + [0.0 for _ in range(4)] for _ in range(4) + ] cls.expected_good_partner_rating = [0.0 for _ in range(4)] @@ -1216,7 +1333,9 @@ class TestSummary(unittest.TestCase): """Separate test to check that summary always builds without failures""" @given( - tournament=tournaments(min_size=2, max_size=5, max_turns=5, max_repetitions=3) + tournament=tournaments( + min_size=2, max_size=5, max_turns=5, max_repetitions=3 + ) ) @settings(max_examples=5, deadline=None) def test_summarise_without_failure(self, tournament): @@ -1227,7 +1346,11 @@ def test_summarise_without_failure(self, tournament): for player in sd: # round for numerical error total_rate = round( - player.CC_rate + player.CD_rate + player.DC_rate + player.DD_rate, 3 + player.CC_rate + + player.CD_rate + + player.DC_rate + + player.DD_rate, + 3, ) self.assertTrue(total_rate in [0, 1]) self.assertTrue(0 <= player.Initial_C_rate <= 1) @@ -1239,9 +1362,13 @@ class TestCreateCounterDict(unittest.TestCase): def test_basic_use(self): key_map = {"Col 1": "Var 1", "Col 2": "Var 2"} df = pd.DataFrame( - {"Col 1": [10, 20, 30], "Col 2": [1, 2, 0]}, index=[[5, 6, 7], [1, 2, 3]] + {"Col 1": [10, 20, 30], "Col 2": [1, 2, 0]}, + index=[[5, 6, 7], [1, 2, 3]], + ) + self.assertEqual( + create_counter_dict(df, 6, 2, key_map), + Counter({"Var 1": 20, "Var 2": 2}), ) self.assertEqual( - create_counter_dict(df, 6, 2, key_map), Counter({"Var 1": 20, "Var 2": 2}) + create_counter_dict(df, 7, 3, key_map), Counter({"Var 1": 30}) ) - self.assertEqual(create_counter_dict(df, 7, 3, key_map), Counter({"Var 1": 30})) diff --git a/axelrod/tests/unit/test_strategy_transformers.py b/axelrod/tests/unit/test_strategy_transformers.py index 7539ae8cd..d96b58c4c 100644 --- a/axelrod/tests/unit/test_strategy_transformers.py +++ b/axelrod/tests/unit/test_strategy_transformers.py @@ -20,6 +20,7 @@ class CanNotPickle(axl.Cooperator): class TestTransformers(TestMatch): """Test generic transformer properties.""" + def test_player_can_be_pickled(self): player = axl.Cooperator() self.assertTrue(player_can_be_pickled(player)) @@ -55,9 +56,13 @@ def test_DecoratorReBuilder(self): args = decorator.args kwargs = decorator.kwargs.copy() - new_decorator = DecoratorReBuilder()(factory_args, args, kwargs, new_prefix) + new_decorator = DecoratorReBuilder()( + factory_args, args, kwargs, new_prefix + ) - self.assertEqual(decorator(axl.Cooperator)(), new_decorator(axl.Cooperator)()) + self.assertEqual( + decorator(axl.Cooperator)(), new_decorator(axl.Cooperator)() + ) def test_StrategyReBuilder_declared_class_with_name_prefix(self): player = CanNotPickle() @@ -151,9 +156,15 @@ def test_repr(self): str(InitialTransformer([D, D, C])(axl.Alternator)()), "Initial Alternator: [D, D, C]", ) - self.assertEqual(str(FlipTransformer()(axl.Random)(0.1)), "Flipped Random: 0.1") self.assertEqual( - str(MixedTransformer(0.3, (axl.Alternator, axl.Bully))(axl.Random)(0.1)), + str(FlipTransformer()(axl.Random)(0.1)), "Flipped Random: 0.1" + ) + self.assertEqual( + str( + MixedTransformer(0.3, (axl.Alternator, axl.Bully))(axl.Random)( + 0.1 + ) + ), "Mutated Random: 0.1: 0.3, ['Alternator', 'Bully']", ) @@ -164,8 +175,7 @@ def test_doc(self): self.assertEqual(player.__doc__, transformer.__doc__) def test_cloning(self): - """Tests that Player.clone preserves the application of transformations. - """ + """Tests that Player.clone preserves the application of transformations.""" p1 = axl.Cooperator() p2 = FlipTransformer()(axl.Cooperator)() # Defector p3 = p2.clone() @@ -181,7 +191,9 @@ def test_composition(self): p2 = axl.Cooperator() self.versus_test(p1, p2, [D, D, C, C, C, C, D, D], [C] * 8) - cls1 = FinalTransformer([D, D])(InitialTransformer([D, D])(axl.Cooperator)) + cls1 = FinalTransformer([D, D])( + InitialTransformer([D, D])(axl.Cooperator) + ) p1 = cls1() p2 = axl.Cooperator() self.versus_test(p1, p2, [D, D, C, C, C, C, D, D], [C] * 8) @@ -208,9 +220,11 @@ def deterministic_reclassifier(original_classifier, *args): return original_classifier StochasticTransformer = StrategyTransformerFactory( - generic_strategy_wrapper, reclassifier=stochastic_reclassifier) + generic_strategy_wrapper, reclassifier=stochastic_reclassifier + ) DeterministicTransformer = StrategyTransformerFactory( - generic_strategy_wrapper, reclassifier=deterministic_reclassifier) + generic_strategy_wrapper, reclassifier=deterministic_reclassifier + ) # Cooperator is not stochastic self.assertFalse(axl.Cooperator().classifier["stochastic"]) @@ -219,12 +233,16 @@ def deterministic_reclassifier(original_classifier, *args): self.assertTrue(player.classifier["stochastic"]) # Composing transforms should return it to not being stochastic - cls1 = compose_transformers(DeterministicTransformer(), StochasticTransformer()) + cls1 = compose_transformers( + DeterministicTransformer(), StochasticTransformer() + ) player = cls1(axl.Cooperator)() self.assertFalse(player.classifier["stochastic"]) # Explicit composition - player = DeterministicTransformer()(StochasticTransformer()(axl.Cooperator))() + player = DeterministicTransformer()( + StochasticTransformer()(axl.Cooperator) + )() self.assertFalse(player.classifier["stochastic"]) # Random is stochastic @@ -235,12 +253,16 @@ def deterministic_reclassifier(original_classifier, *args): self.assertFalse(player.classifier["stochastic"]) # Composing transforms should return it to being stochastic - cls1 = compose_transformers(StochasticTransformer(), DeterministicTransformer()) + cls1 = compose_transformers( + StochasticTransformer(), DeterministicTransformer() + ) player = cls1(axl.Random)() self.assertTrue(player.classifier["stochastic"]) # Explicit composition - player = StochasticTransformer()(DeterministicTransformer()(axl.Random))() + player = StochasticTransformer()( + DeterministicTransformer()(axl.Random) + )() self.assertTrue(player.classifier["stochastic"]) def test_nilpotency(self): @@ -316,16 +338,20 @@ def test_deadlock_breaks(self): axl.TitForTat(), InitialTransformer([D])(axl.TitForTat)(), [C, D, C, D], - [D, C, D, C]) + [D, C, D, C], + ) # Now let's use the transformer to break the deadlock to achieve # Mutual cooperation # self.versus_test( self.versus_test( axl.TitForTat(), - DeadlockBreakingTransformer()(InitialTransformer([D])(axl.TitForTat))(), + DeadlockBreakingTransformer()( + InitialTransformer([D])(axl.TitForTat) + )(), [C, D, C, C], - [D, C, C, C]) + [D, C, C, C], + ) class TestDualTransformer(TestMatch): @@ -377,10 +403,12 @@ def test_dual_transformer_simple_play_regression_test(self): DualTransformer()(axl.Cooperator) )() - self.versus_test(multiple_dual_transformers, - dual_transformer_not_first, - [D, D, D], - [D, D, D]) + self.versus_test( + multiple_dual_transformers, + dual_transformer_not_first, + [D, D, D], + [D, D, D], + ) def test_dual_transformer_multiple_interspersed_regression_test(self): """DualTransformer has failed when there were multiple DualTransformers. @@ -404,9 +432,11 @@ def test_final_transformer(self): # Final play transformer p1 = axl.Cooperator() p2 = FinalTransformer([D, D, D])(axl.Cooperator)() - self.assertEqual(axl.Classifiers["makes_use_of"](p2), set(["length"])) + self.assertEqual(axl.Classifiers["makes_use_of"](p2), {"length"}) self.assertEqual(axl.Classifiers["memory_depth"](p2), 3) - self.assertEqual(axl.Classifiers["makes_use_of"](axl.Cooperator()), set([])) + self.assertEqual( + axl.Classifiers["makes_use_of"](axl.Cooperator()), set([]) + ) self.versus_test(p1, p2, [C] * 8, [C, C, C, C, C, D, D, D], turns=8) def test_infinite_memory_depth_transformed(self): @@ -419,8 +449,9 @@ def test_final_transformer_unknown_length(self): """Tests the FinalTransformer when tournament length is not known.""" p1 = axl.Defector() p2 = FinalTransformer([D, D])(axl.Cooperator)() - self.versus_test(p1, p2, [D] * 6, [C] * 6, - match_attributes={"length": -1}) + self.versus_test( + p1, p2, [D] * 6, [C] * 6, match_attributes={"length": -1} + ) class TestFlipTransformer(TestMatch): @@ -449,8 +480,9 @@ def test_forgiving_transformer(self): p1 = ForgiverTransformer(0.5)(axl.Alternator)() p2 = axl.Defector() turns = 10 - self.versus_test(p1, p2, [C, D, C, C, D, C, C, D, C, D], [D] * turns, - seed=8) + self.versus_test( + p1, p2, [C, D, C, C, D, C, C, D, C, D], [D] * turns, seed=8 + ) def test_stochastic_values_classifier(self): p1 = ForgiverTransformer(0.5)(axl.Alternator)() @@ -473,7 +505,9 @@ def test_grudging1(self): def test_grudging2(self): p1 = InitialTransformer([C])(axl.Defector)() p2 = GrudgeTransformer(2)(axl.Cooperator)() - self.versus_test(p1, p2, [C, D, D, D, D, D, D, D], [C, C, C, C, D, D, D, D], seed=11) + self.versus_test( + p1, p2, [C, D, D, D, D, D, D, D], [C, C, C, C, D, D, D, D], seed=11 + ) class TestHistoryTrackingTransformer(TestMatch): @@ -510,8 +544,12 @@ def test_generic(self): Defector2 = transformer(axl.Defector) turns = 100 - self.versus_test(axl.Cooperator(), Cooperator2(), [C] * turns, [C] * turns) - self.versus_test(axl.Cooperator(), Defector2(), [C] * turns, [D] * turns) + self.versus_test( + axl.Cooperator(), Cooperator2(), [C] * turns, [C] * turns + ) + self.versus_test( + axl.Cooperator(), Defector2(), [C] * turns, [D] * turns + ) class TestInitialTransformer(TestMatch): @@ -684,7 +722,9 @@ def test_noisy_transformer(self): p1 = axl.Cooperator() p2 = NoisyTransformer(0.5)(axl.Cooperator)() self.assertTrue(axl.Classifiers["stochastic"](p2)) - self.versus_test(p1, p2, [C] * 10, [D, C, C, D, C, D, C, D, D, C], seed=1) + self.versus_test( + p1, p2, [C] * 10, [D, C, C, D, C, D, C, D, D, C], seed=1 + ) def test_noisy_transformation_stochastic(self): """Depending on the value of the noise parameter, the strategy may become stochastic @@ -722,7 +762,9 @@ def test_retailiating_cooperator_against_2TFT(self): TwoTitsForTat = RetaliationTransformer(2)(axl.Cooperator) p1 = TwoTitsForTat() p2 = axl.CyclerCCD() - self.versus_test(p1, p2, [C, C, C, D, D, C, D, D, C], [C, C, D, C, C, D, C, C, D]) + self.versus_test( + p1, p2, [C, C, C, D, D, C, D, D, C], [C, C, D, C, C, D, C, C, D] + ) class TestRetailiateUntilApologyTransformer(TestMatch): @@ -746,6 +788,7 @@ def test_retaliation_until_apology_stochastic(self): # Run the standard Player tests on some specifically transformed players + class TestNullInitialTransformedCooperator(TestPlayer): player = InitialTransformer([])(axl.Cooperator) name = "Initial Cooperator: []" @@ -786,7 +829,9 @@ class TestFinalTransformedCooperator(TestPlayer): class TestInitialFinalTransformedCooperator(TestPlayer): - player = InitialTransformer([D, D])(FinalTransformer([D, D, D])(axl.Cooperator)) + player = InitialTransformer([D, D])( + FinalTransformer([D, D, D])(axl.Cooperator) + ) name = "Initial Final Cooperator: [D, D, D]: [D, D]" expected_classifier = { "memory_depth": 3, @@ -799,7 +844,9 @@ class TestInitialFinalTransformedCooperator(TestPlayer): class TestFinalInitialTransformedCooperator(TestPlayer): - player = FinalTransformer([D, D])(InitialTransformer([D, D, D])(axl.Cooperator)) + player = FinalTransformer([D, D])( + InitialTransformer([D, D, D])(axl.Cooperator) + ) name = "Final Initial Cooperator: [D, D, D]: [D, D]" expected_classifier = { "memory_depth": 3, @@ -911,7 +958,9 @@ class TestMixed0(TestDefector): class TestMixed1(TestDefector): - name = "Mutated Cooperator: 1, " + name = ( + "Mutated Cooperator: 1, " + ) player = MixedTransformer(1, axl.Defector)(axl.Cooperator) expected_classifier = { "memory_depth": 0, @@ -952,20 +1001,6 @@ class TestFlippedDualTransformer(TestPlayer): } -class TestIdentityDualTransformer(TestPlayer): - name = "Dual Cooperator" - player = IdentityTransformer()(DualTransformer()(axl.Cooperator)) - expected_classifier = { - "memory_depth": 0, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - class TestDualJossAnn(TestPlayer): name = "Dual Joss-Ann Alternator: (0.2, 0.3)" player = DualTransformer()(JossAnnTransformer((0.2, 0.3))(axl.Alternator)) @@ -996,7 +1031,9 @@ class TestJossAnnDual(TestPlayer): class TestJossAnnOverwriteClassifier(TestPlayer): name = "Joss-Ann Final Random: 0.5: [D, D]: (1.0, 0.0)" - player = JossAnnTransformer((1., 0.))(FinalTransformer([D, D])(axl.Random)) + player = JossAnnTransformer((1.0, 0.0))( + FinalTransformer([D, D])(axl.Random) + ) expected_classifier = { "memory_depth": 0, "stochastic": False, diff --git a/axelrod/tests/unit/test_strategy_utils.py b/axelrod/tests/unit/test_strategy_utils.py index b3a4f7ef0..3946769f5 100644 --- a/axelrod/tests/unit/test_strategy_utils.py +++ b/axelrod/tests/unit/test_strategy_utils.py @@ -26,7 +26,7 @@ def test_finds_cycle(self, cycle, period): history = cycle * period detected = detect_cycle(history) self.assertIsNotNone(detected) - self.assertIn("".join(map(str, detected)), "".join(map(str, (cycle)))) + self.assertIn("".join(map(str, detected)), "".join(map(str, cycle))) def test_no_cycle(self): history = [C, D, C, C] @@ -35,7 +35,9 @@ def test_no_cycle(self): history = [D, D, C, C, C] self.assertIsNone(detect_cycle(history)) - def test_regression_test_can_detect_cycle_that_is_repeated_exactly_once(self): + def test_regression_test_can_detect_cycle_that_is_repeated_exactly_once( + self, + ): self.assertEqual(detect_cycle([C, D, C, D]), (C, D)) self.assertEqual(detect_cycle([C, D, C, D, C]), (C, D)) @@ -53,11 +55,14 @@ def test_min_size_greater_than_two_times_history_tail_returns_none(self): def test_min_size_greater_than_two_times_max_size_has_no_effect(self): self.assertEqual( - detect_cycle([C, C, C, C, C, C, C, C], min_size=2, max_size=3), (C, C) + detect_cycle([C, C, C, C, C, C, C, C], min_size=2, max_size=3), + (C, C), ) def test_cycle_greater_than_max_size_returns_none(self): - self.assertEqual(detect_cycle([C, C, D] * 2, min_size=1, max_size=3), (C, C, D)) + self.assertEqual( + detect_cycle([C, C, D] * 2, min_size=1, max_size=3), (C, C, D) + ) self.assertIsNone(detect_cycle([C, C, D] * 2, min_size=1, max_size=2)) @@ -81,7 +86,9 @@ def test_strategies_with_countermeasures_return_their_countermeasures(self): inspector = axl.Cooperator() match = axl.Match((d_geller, inspector), turns=1) match.play() - self.assertEqual(inspect_strategy(inspector=inspector, opponent=d_geller), D) + self.assertEqual( + inspect_strategy(inspector=inspector, opponent=d_geller), D + ) self.assertEqual(d_geller.strategy(inspector), C) diff --git a/axelrod/tests/unit/test_tournament.py b/axelrod/tests/unit/test_tournament.py index f8cca4bb4..18f206d85 100644 --- a/axelrod/tests/unit/test_tournament.py +++ b/axelrod/tests/unit/test_tournament.py @@ -41,7 +41,9 @@ test_edges = [(0, 1), (1, 2), (3, 4)] deterministic_strategies = [ - s for s in axl.short_run_time_strategies if not axl.Classifiers["stochastic"](s()) + s + for s in axl.short_run_time_strategies + if not axl.Classifiers["stochastic"](s()) ] @@ -107,7 +109,9 @@ def test_init(self): noise=0.2, ) self.assertEqual(len(tournament.players), len(test_strategies)) - self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.Game) + self.assertIsInstance( + tournament.players[0].match_attributes["game"], axl.Game + ) self.assertEqual(tournament.game.score((C, C)), (3, 3)) self.assertEqual(tournament.turns, self.test_turns) self.assertEqual(tournament.repetitions, 10) @@ -123,7 +127,9 @@ def test_init_with_match_attributes(self): ) mg = tournament.match_generator match_params = mg.build_single_match_params() - self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) + self.assertEqual( + match_params["match_attributes"], {"length": float("inf")} + ) def test_warning(self): tournament = axl.Tournament( @@ -275,7 +281,11 @@ def test_serial_play_with_different_game(self): # Test that a non default game is passed to the result set game = axl.Game(p=-1, r=-1, s=-1, t=-1) tournament = axl.Tournament( - name=self.test_name, players=self.players, game=game, turns=1, repetitions=1 + name=self.test_name, + players=self.players, + game=game, + turns=1, + repetitions=1, ) results = tournament.play(progress_bar=False) self.assertLessEqual(np.max(results.scores), 0) @@ -409,7 +419,9 @@ def test_progress_bar_play_parallel(self): # these two examples were identified by hypothesis. @example( tournament=axl.Tournament( - players=[axl.BackStabber(), axl.MindReader()], turns=2, repetitions=1, + players=[axl.BackStabber(), axl.MindReader()], + turns=2, + repetitions=1, ) ) @example( @@ -541,7 +553,9 @@ def test_n_workers(self): tournament._n_workers(processes=max_processes + 2), max_processes ) - @unittest.skipIf(cpu_count() < 2, "not supported on single processor machines") + @unittest.skipIf( + cpu_count() < 2, "not supported on single processor machines" + ) def test_2_workers(self): # This is a separate test with a skip condition because we # cannot guarantee that the tests will always run on a machine @@ -725,20 +739,30 @@ def test_write_to_csv_without_results(self): turns=2, repetitions=2, ) - tournament.play(filename=self.filename, progress_bar=False, build_results=False) + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) df = pd.read_csv(self.filename) - path = pathlib.Path("test_outputs/expected_test_tournament_no_results.csv") + path = pathlib.Path( + "test_outputs/expected_test_tournament_no_results.csv" + ) expected_df = pd.read_csv(axl_filename(path)) self.assertTrue(df.equals(expected_df)) @given(seed=integers(min_value=1, max_value=4294967295)) + @example(seed=2) @settings(max_examples=5, deadline=None) def test_seeding_equality(self, seed): """Tests that a tournament with a given seed will return the same results each time. This specifically checks when running using multiple cores so as to confirm that https://github.com/Axelrod-Python/Axelrod/issues/1277 - is fixed.""" + is fixed. + + Note that the final asserts test only specific properties of the results + sets and not the entire result sets as some floating point errors can + emerge. + """ rng = axl.RandomGenerator(seed=seed) players = [axl.Random(rng.random()) for _ in range(8)] tournament1 = axl.Tournament( @@ -747,7 +771,7 @@ def test_seeding_equality(self, seed): game=self.game, turns=10, repetitions=100, - seed=seed + seed=seed, ) tournament2 = axl.Tournament( name=self.test_name, @@ -755,12 +779,15 @@ def test_seeding_equality(self, seed): game=self.game, turns=10, repetitions=100, - seed=seed + seed=seed, ) for _ in range(4): - results1 = tournament1.play(processes=2) - results2 = tournament2.play(processes=2) - self.assertEqual(results1.ranked_names, results2.ranked_names) + results1 = tournament1.play(processes=2, progress_bar=False) + results2 = tournament2.play(processes=2, progress_bar=False) + self.assertEqual(results1.wins, results2.wins) + self.assertEqual(results1.match_lengths, results2.match_lengths) + self.assertEqual(results1.scores, results2.scores) + self.assertEqual(results1.cooperation, results2.cooperation) def test_seeding_inequality(self): players = [axl.Random(0.4), axl.Random(0.6)] @@ -770,7 +797,7 @@ def test_seeding_inequality(self): game=self.game, turns=2, repetitions=2, - seed=0 + seed=0, ) tournament2 = axl.Tournament( name=self.test_name, @@ -778,7 +805,7 @@ def test_seeding_inequality(self): game=self.game, turns=2, repetitions=2, - seed=10 + seed=10, ) results1 = tournament1.play() results2 = tournament2.play() @@ -802,7 +829,9 @@ def test_init(self): prob_end=self.test_prob_end, noise=0.2, ) - self.assertEqual(tournament.match_generator.prob_end, tournament.prob_end) + self.assertEqual( + tournament.match_generator.prob_end, tournament.prob_end + ) self.assertEqual(len(tournament.players), len(test_strategies)) self.assertEqual(tournament.game.score((C, C)), (3, 3)) self.assertIsNone(tournament.turns) @@ -821,7 +850,7 @@ def test_init(self): max_prob_end=0.9, min_repetitions=2, max_repetitions=4, - seed=100 + seed=100, ) ) @settings(max_examples=5, deadline=None) @@ -830,7 +859,7 @@ def test_init(self): players=[s() for s in test_strategies], prob_end=0.2, repetitions=test_repetitions, - seed=101 + seed=101, ) ) # These two examples are to make sure #465 is fixed. @@ -841,7 +870,7 @@ def test_init(self): players=[axl.BackStabber(), axl.MindReader()], prob_end=0.2, repetitions=1, - seed=102 + seed=102, ) ) @example( @@ -849,7 +878,7 @@ def test_init(self): players=[axl.ThueMorse(), axl.MindReader()], prob_end=0.2, repetitions=1, - seed=103 + seed=103, ) ) def test_property_serial_play(self, tournament): @@ -902,7 +931,9 @@ def test_init(self): seed=integers(min_value=0, max_value=4294967295), ) @settings(max_examples=5, deadline=None) - def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): + def test_complete_tournament( + self, strategies, turns, repetitions, noise, seed + ): """ A test to check that a spatial tournament on the complete multigraph gives the same results as the round robin. @@ -917,13 +948,20 @@ def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): # create a round robin tournament tournament = axl.Tournament( - players, repetitions=repetitions, turns=turns, noise=noise, - seed=seed + players, + repetitions=repetitions, + turns=turns, + noise=noise, + seed=seed, ) # create a complete spatial tournament spatial_tournament = axl.Tournament( - players, repetitions=repetitions, turns=turns, noise=noise, edges=edges, - seed=seed + players, + repetitions=repetitions, + turns=turns, + noise=noise, + edges=edges, + seed=seed, ) results = tournament.play(progress_bar=False) @@ -932,16 +970,23 @@ def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): self.assertEqual(results.ranked_names, spatial_results.ranked_names) self.assertEqual(results.num_players, spatial_results.num_players) self.assertEqual(results.repetitions, spatial_results.repetitions) - self.assertEqual(results.payoff_diffs_means, spatial_results.payoff_diffs_means) + self.assertEqual( + results.payoff_diffs_means, spatial_results.payoff_diffs_means + ) self.assertEqual(results.payoff_matrix, spatial_results.payoff_matrix) self.assertEqual(results.payoff_stddevs, spatial_results.payoff_stddevs) self.assertEqual(results.payoffs, spatial_results.payoffs) - self.assertEqual(results.cooperating_rating, spatial_results.cooperating_rating) + self.assertEqual( + results.cooperating_rating, spatial_results.cooperating_rating + ) self.assertEqual(results.cooperation, spatial_results.cooperation) self.assertEqual( - results.normalised_cooperation, spatial_results.normalised_cooperation + results.normalised_cooperation, + spatial_results.normalised_cooperation, + ) + self.assertEqual( + results.normalised_scores, spatial_results.normalised_scores ) - self.assertEqual(results.normalised_scores, spatial_results.normalised_scores) self.assertEqual( results.good_partner_matrix, spatial_results.good_partner_matrix ) @@ -961,7 +1006,12 @@ def test_particular_tournament(self): edges = [(0, 2), (0, 3), (1, 2), (1, 3)] tournament = axl.Tournament(players, edges=edges) results = tournament.play(progress_bar=False) - expected_ranked_names = ["Cooperator", "Tit For Tat", "Grudger", "Defector"] + expected_ranked_names = [ + "Cooperator", + "Tit For Tat", + "Grudger", + "Defector", + ] self.assertEqual(results.ranked_names, expected_ranked_names) # Check that this tournament runs with noise @@ -1018,17 +1068,19 @@ def test_complete_tournament(self, strategies, prob_end, seed, reps): # create a prob end round robin tournament - tournament = axl.Tournament(players, prob_end=prob_end, repetitions=reps, - seed=seed) + tournament = axl.Tournament( + players, prob_end=prob_end, repetitions=reps, seed=seed + ) results = tournament.play(progress_bar=False) # create a complete spatial tournament # edges - edges = [(i, j) for i in range(len(players)) for j in range(i, len(players))] + edges = [ + (i, j) for i in range(len(players)) for j in range(i, len(players)) + ] spatial_tournament = axl.Tournament( - players, prob_end=prob_end, repetitions=reps, edges=edges, - seed=seed + players, prob_end=prob_end, repetitions=reps, edges=edges, seed=seed ) spatial_results = spatial_tournament.play(progress_bar=False) self.assertEqual(results.match_lengths, spatial_results.match_lengths) @@ -1063,7 +1115,9 @@ def test_one_turn_tournament(self, tournament, seed): one_turn_results = tournament.play(progress_bar=False) self.assertEqual(prob_end_results.scores, one_turn_results.scores) self.assertEqual(prob_end_results.wins, one_turn_results.wins) - self.assertEqual(prob_end_results.cooperation, one_turn_results.cooperation) + self.assertEqual( + prob_end_results.cooperation, one_turn_results.cooperation + ) class TestHelperFunctions(unittest.TestCase): diff --git a/axelrod/tournament.py b/axelrod/tournament.py index 925fe1477..67824c8ff 100644 --- a/axelrod/tournament.py +++ b/axelrod/tournament.py @@ -33,7 +33,7 @@ def __init__( noise: float = 0, edges: List[Tuple] = None, match_attributes: dict = None, - seed: int = None + seed: int = None, ) -> None: """ Parameters @@ -52,8 +52,6 @@ def __init__( The number of times the round robin should be repeated noise : float The probability that a player's intended action should be flipped - prob_end : float - The probability of a given turn ending a match edges : list A list of edges between players match_attributes : dict @@ -87,7 +85,7 @@ def __init__( noise=self.noise, edges=edges, match_attributes=match_attributes, - seed=self.seed + seed=self.seed, ) self._logger = logging.getLogger(__name__) @@ -97,7 +95,7 @@ def __init__( def setup_output(self, filename=None): """assign/create `filename` to `self`. If file should be deleted once - `play` is finished, assign a file descriptor. """ + `play` is finished, assign a file descriptor.""" temp_file_descriptor = None if filename is None: temp_file_descriptor, filename = mkstemp() @@ -232,7 +230,9 @@ def _get_file_objects(self, build_results=True): def _get_progress_bar(self): if self.use_progress_bar: - return tqdm.tqdm(total=self.match_generator.size, desc="Playing matches") + return tqdm.tqdm( + total=self.match_generator.size, desc="Playing matches" + ) return None def _write_interactions_to_file(self, results, writer): @@ -256,8 +256,14 @@ def _write_interactions_to_file(self, results, writer): ) = results for index, player_index in enumerate(index_pair): opponent_index = index_pair[index - 1] - row = [self.num_interactions, player_index, opponent_index, repetition, - str(self.players[player_index]), str(self.players[opponent_index])] + row = [ + self.num_interactions, + player_index, + opponent_index, + repetition, + str(self.players[player_index]), + str(self.players[opponent_index]), + ] history = actions_to_str([i[index] for i in interaction]) row.append(history) @@ -277,16 +283,24 @@ def _write_interactions_to_file(self, results, writer): for state in states: row.append(state_distribution[state]) for state in states: - row.append(state_to_action_distributions[index][(state, C)]) - row.append(state_to_action_distributions[index][(state, D)]) + row.append( + state_to_action_distributions[index][(state, C)] + ) + row.append( + state_to_action_distributions[index][(state, D)] + ) - row.append(int(cooperations[index] >= cooperations[index - 1])) + row.append( + int(cooperations[index] >= cooperations[index - 1]) + ) writer.writerow(row) repetition += 1 self.num_interactions += 1 - def _run_parallel(self, processes: int = 2, build_results: bool = True) -> bool: + def _run_parallel( + self, processes: int = 2, build_results: bool = True + ) -> bool: """ Run all matches in parallel @@ -349,7 +363,8 @@ def _start_workers( """ for worker in range(workers): process = Process( - target=self._worker, args=(work_queue, done_queue, build_results) + target=self._worker, + args=(work_queue, done_queue, build_results), ) work_queue.put("STOP") process.start() @@ -387,7 +402,9 @@ def _process_done_queue( _close_objects(out_file, progress_bar) return True - def _worker(self, work_queue: Queue, done_queue: Queue, build_results: bool = True): + def _worker( + self, work_queue: Queue, done_queue: Queue, build_results: bool = True + ): """ The work for each parallel sub-process to execute. @@ -455,13 +472,17 @@ def _calculate_results(self, interactions): turns = len(interactions) results.append(turns) - score_per_turns = iu.compute_final_score_per_turn(interactions, self.game) + score_per_turns = iu.compute_final_score_per_turn( + interactions, self.game + ) results.append(score_per_turns) score_diffs_per_turns = score_diffs[0] / turns, score_diffs[1] / turns results.append(score_diffs_per_turns) - initial_coops = tuple(map(bool, iu.compute_cooperations(interactions[:1]))) + initial_coops = tuple( + map(bool, iu.compute_cooperations(interactions[:1])) + ) results.append(initial_coops) cooperations = iu.compute_cooperations(interactions) diff --git a/docs/conf.py b/docs/conf.py index 72d0a774c..0e33e21c3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -231,7 +231,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ("index", "Axelrod.tex", "Axelrod Documentation", "Vincent Knight", "manual") + ( + "index", + "Axelrod.tex", + "Axelrod Documentation", + "Vincent Knight", + "manual", + ) ] # The name of an image file (relative to this directory) to place at the top of @@ -259,7 +265,9 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [("index", "axelrod", "Axelrod Documentation", ["Vincent Knight"], 1)] +man_pages = [ + ("index", "axelrod", "Axelrod Documentation", ["Vincent Knight"], 1) +] # If true, show URL addresses after external links. # man_show_urls = False diff --git a/docs/tutorials/advanced/setting_a_seed.rst b/docs/tutorials/advanced/setting_a_seed.rst index cf3b16b45..119a259a9 100644 --- a/docs/tutorials/advanced/setting_a_seed.rst +++ b/docs/tutorials/advanced/setting_a_seed.rst @@ -53,7 +53,7 @@ To seed a tournament we also pass a seed to the tournament at creation time: >>> tournament = axl.Tournament(players, turns=5, repetitions=5, seed=seed) >>> results = tournament.play(processes=1) >>> tournament2 = axl.Tournament(players, turns=5, repetitions=5, seed=seed) - >>> results2 = tournament.play(processes=1) + >>> results2 = tournament2.play(processes=1) >>> results.ranked_names == results2.ranked_names True @@ -65,7 +65,7 @@ rankings, will be the same. >>> tournament = axl.Tournament(players, turns=5, repetitions=5, seed=201) >>> results = tournament.play(processes=2) >>> tournament2 = axl.Tournament(players, turns=5, repetitions=5, seed=201) - >>> results2 = tournament.play(processes=2) + >>> results2 = tournament2.play(processes=2) >>> results.ranked_names == results2.ranked_names True diff --git a/docs/tutorials/contributing/guidelines.rst b/docs/tutorials/contributing/guidelines.rst index 42fc0923d..3232ad9ac 100644 --- a/docs/tutorials/contributing/guidelines.rst +++ b/docs/tutorials/contributing/guidelines.rst @@ -12,15 +12,18 @@ The project follows the following guidelines: `_ which includes **using descriptive variable names**. 3. Code Format: Use the `Black formatter `_ to format - all code and the `isort utility `_ to - sort import statements. + all code and the `isort utility `_ to + sort import statements. You can run black on all code with:: + + $ python -m black -l 80 . + 4. Commits: Please try to use commit messages that give a meaningful history for anyone using git's log features. Try to use messages that complete sentence, "This commit will..." There is some excellent guidance on the subject from `Chris Beams `_ 5. Testing: the project uses the `unittest `_ library and has a nice - testing suite that makes some things very easy to write tests for. Please try + testing suite that makes some things easy to write tests for. Please try to increase the test coverage on pull requests. 6. Merging pull-requests: We require two of the (currently three) core-team maintainers to merge. Opening a PR for early diff --git a/docs/tutorials/contributing/index.rst b/docs/tutorials/contributing/index.rst index 9460afdbf..6a3555ad7 100644 --- a/docs/tutorials/contributing/index.rst +++ b/docs/tutorials/contributing/index.rst @@ -10,6 +10,7 @@ Contents: :maxdepth: 2 guidelines.rst + setting_up_the_environment.rst strategy/index.rst library/index.rst running_tests.rst diff --git a/docs/tutorials/contributing/running_tests.rst b/docs/tutorials/contributing/running_tests.rst index 3afcde7f6..f3ca6d961 100644 --- a/docs/tutorials/contributing/running_tests.rst +++ b/docs/tutorials/contributing/running_tests.rst @@ -4,10 +4,6 @@ Running tests Basic test runners ------------------ -Before running tests, you should have hypothesis 3.2 installed:: - - $ pip install hypothesis==3.2 - The project has an extensive test suite which is run each time a new contribution is made to the repository. If you want to check that all the tests pass before you submit a pull request you can run the tests yourself:: @@ -73,12 +69,3 @@ You can also run the type checker on a given file. For example, to run the type checker on the Grudger strategy:: $ mypy --ignore-missing-imports --follow-imports skip axelrod/strategies/grudger.py - - -Continuous integration -====================== - -This project is being taken care of by `travis-ci -`_, so all tests will be run automatically when opening -a pull request. You can see the latest build status `here -`_. diff --git a/docs/tutorials/contributing/setting_up_the_environment.rst b/docs/tutorials/contributing/setting_up_the_environment.rst new file mode 100644 index 000000000..2d6be2f86 --- /dev/null +++ b/docs/tutorials/contributing/setting_up_the_environment.rst @@ -0,0 +1,38 @@ +Setting up the environment +========================== + +Installing all dependencies +--------------------------- + +All dependencies can be installed by running:: + + $ pip install -r requirements.txt + +It is recommended to do this using a virtual environment tool of your choice. + +For example, when using the virtual environment library :code:`venv`:: + + $ python -m venv axelrod_development + $ source axelrod_development/bin/activate + $ pip install -r requirements.txt + +The git workflow +---------------- + +There are two important branches in this repository: + +- :code:`dev`: The most up to date branch with no failing tests. + This is the default branch on github. +- :code:`release`: The latest release. + +When working on a new contribution branch from the latest :code:`dev` branch and +open a Pull Request on github from your branch to the :code:`dev` branch. + +The procedure for a new release (this is carried out by one of core maintainers): + +1. Create a Pull Request from :code:`dev` to :code:`release` which should + include an update to :code:`axelrod/version.py` and :code:`CHANGES.md` +2. Create a git tag. +3. Push to github. +4. Create a release on github. +5. Push to PyPi: :code:`python setup.py sdist bdist_wheel upload` diff --git a/doctests.py b/doctests.py index 8bc74f50f..cb027d310 100644 --- a/doctests.py +++ b/doctests.py @@ -7,19 +7,19 @@ # Note loader and ignore are required arguments for unittest even if unused. def load_tests(loader, tests, ignore): """ - Locates and returns a collection of unittests in a TestSuite object - Parameters - ---------- - loader : - A required but unused parameter. - tests : - A unittest TestSuite object for collecting the needed test cases. - ignore : - A required but unused parameter. - Returns - ------- - tests : - A unittest TestSuite object that holds test cases. + Locates and returns a collection of unittests in a TestSuite object + Parameters + ---------- + loader : + A required but unused parameter. + tests : + A unittest TestSuite object for collecting the needed test cases. + ignore : + A required but unused parameter. + Returns + ------- + tests : + A unittest TestSuite object that holds test cases. """ for root, dirs, files in os.walk("."): for f in files: @@ -27,7 +27,8 @@ def load_tests(loader, tests, ignore): tests.addTests( doctest.DocFileSuite( # ELLIPSIS option tells doctest to ignore portions of the verification value. - os.path.join(root, f), optionflags=doctest.ELLIPSIS + os.path.join(root, f), + optionflags=doctest.ELLIPSIS, ) )