Commit 2732799b by u214892

### 50 removed stale code (flake8 E800)

parent df440c58
Pipeline #922 failed with stage
in 6 minutes and 59 seconds
 ... ... @@ -335,7 +335,6 @@ class GridTransitionMap(TransitionMap): binTrans = self.get_transitions(rcPos) # 16bit integer - all trans in/out lnBinTrans = array([binTrans >> 8, binTrans & 0xff], dtype=np.uint8) # 2 x uint8 g2binTrans = np.unpackbits(lnBinTrans).reshape(4, 4) # 4x4 x uint8 binary(0,1) # gDirIn = g2binTrans.any(axis=1) # inbound directions as boolean array (4) gDirOut = g2binTrans.any(axis=0) # outbound directions as boolean array (4) giDirOut = np.argwhere(gDirOut)[:, 0] # valid outbound directions as array of int ... ...
 ... ... @@ -156,9 +156,8 @@ def a_star(rail_trans, rail_array, start, end): # create the f, g, and h values child.g = current_node.g + 1 # this heuristic favors diagonal paths # child.h = ((child.pos[0] - end_node.pos[0]) ** 2) + \ # ((child.pos[1] - end_node.pos[1]) ** 2) # this heuristic favors diagonal paths: # child.h = ((child.pos[0] - end_node.pos[0]) ** 2) + ((child.pos[1] - end_node.pos[1]) ** 2) \ # noqa: E800 # this heuristic avoids diagonal paths child.h = abs(child.pos[0] - end_node.pos[0]) + abs(child.pos[1] - end_node.pos[1]) child.f = child.g + child.h ... ... @@ -199,7 +198,6 @@ def connect_rail(rail_trans, rail_array, start, end): else: # into existing rail new_trans = rail_trans.set_transition(new_trans, current_dir, new_dir, 1) # new_trans = rail_trans.set_transition(new_trans, mirror(new_dir), mirror(current_dir), 1) else: # set the forward path new_trans = rail_trans.set_transition(new_trans, current_dir, new_dir, 1) ... ... @@ -216,7 +214,6 @@ def connect_rail(rail_trans, rail_array, start, end): else: # into existing rail new_trans_e = rail_trans.set_transition(new_trans_e, new_dir, new_dir, 1) # new_trans_e = rail_trans.set_transition(new_trans_e, mirror(new_dir), mirror(new_dir), 1) rail_array[end_pos] = new_trans_e current_dir = new_dir ... ...
 ... ... @@ -95,7 +95,6 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist= for j in range(2): dist = distance_on_rail(sg_new[i], sg[j]) if dist < 2: # print("too close:", dist, sg_new[i], sg[j]) return False return True ... ... @@ -105,18 +104,15 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist= if not all_ok: # we can might as well give up at this point # print("\n> Complex Rail Gen: Sanity counter reached, giving up!") break new_path = connect_rail(rail_trans, rail_array, start, goal) if len(new_path) >= 2: nr_created += 1 # print(":::: path: ", new_path) start_goal.append([start, goal]) start_dir.append(mirror(get_direction(new_path[0], new_path[1]))) else: # after too many failures we will give up # print("failed...") created_sanity += 1 # add extra connections between existing rail ... ... @@ -139,9 +135,6 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist= if len(new_path) >= 2: nr_created += 1 # print("\n> Complex Rail Gen: Created #", len(start_goal), "pairs and #", nr_created, "extra connections") # print(start_goal) agents_position = [sg[0] for sg in start_goal[:num_agents]] agents_target = [sg[1] for sg in start_goal[:num_agents]] agents_direction = start_dir[:num_agents] ... ... @@ -351,7 +344,6 @@ def random_rail_generator(cell_type_relative_proportion=[1.0] * 11): num_insertions = 0 while num_insertions < MAX_INSERTIONS and len(cells_to_fill) > 0: # cell = random.sample(cells_to_fill, 1)[0] cell = cells_to_fill[np.random.choice(len(cells_to_fill), 1)[0]] cells_to_fill.remove(cell) row = cell[0] ... ...
 ... ... @@ -37,14 +37,9 @@ class TreeObsForRailEnv(ObservationBuilder): 4)) self.max_dist = np.zeros(nAgents) # for i in range(nAgents): # self.max_dist[i] = self._distance_map_walker(self.env.agents_target[i], i) self.max_dist = [self._distance_map_walker(agent.target, i) for i, agent in enumerate(agents)] # Update local lookup table for all agents' target locations self.location_has_target = {} # for loc in self.env.agents_target: # self.location_has_target[(loc[0], loc[1])] = 1 self.location_has_target = {tuple(agent.target): 1 for agent in agents} def _distance_map_walker(self, position, target_nr): ... ... @@ -57,7 +52,6 @@ class TreeObsForRailEnv(ObservationBuilder): self.distance_map[target_nr, position[0], position[1], :] = 0 # Fill in the (up to) 4 neighboring nodes # nodes_queue = [] # list of tuples (row, col, direction, distance); # direction is the direction of movement, meaning that at least a possible orientation of an agent # in cell (row,col) allows a movement in direction `direction' nodes_queue = deque(self._get_and_update_neighbors(position, target_nr, 0, enforce_target_direction=-1)) ... ... @@ -200,9 +194,6 @@ class TreeObsForRailEnv(ObservationBuilder): """ # Update local lookup table for all agents' positions # self.location_has_agent = {} # for loc in self.env.agents_position: # self.location_has_agent[(loc[0], loc[1])] = 1 self.location_has_agent = {tuple(agent.position): 1 for agent in self.env.agents} if handle > len(self.env.agents): print("ERROR: obs _get - handle ", handle, " len(agents)", len(self.env.agents)) ... ... @@ -259,8 +250,6 @@ class TreeObsForRailEnv(ObservationBuilder): visited = set() # other_agent_encountered = False # other_target_encountered = False other_agent_encountered = np.inf other_target_encountered = np.inf ... ... @@ -271,12 +260,10 @@ class TreeObsForRailEnv(ObservationBuilder): # Modify here to compute any useful data required to build the end node's features. This code is called # for each cell visited between the previous branching node and the next switch / target / dead-end. if position in self.location_has_agent: # other_agent_encountered = True if num_steps < other_agent_encountered: other_agent_encountered = num_steps if position in self.location_has_target: # other_target_encountered = True if num_steps < other_target_encountered: other_target_encountered = num_steps # ############################# ... ... @@ -519,12 +506,6 @@ class GlobalObsForRailEnv(ObservationBuilder): bitlist = [int(digit) for digit in bin(self.env.rail.get_transitions((i, j)))[2:]] bitlist = [0] * (16 - len(bitlist)) + bitlist self.rail_obs[i, j] = np.array(bitlist) # self.rail_obs[i, j] = np.array( # list(f'{self.env.rail.get_transitions((i, j)):016b}')).astype(int) # self.targets = np.zeros(self.env.height, self.env.width) # for target_pos in self.env.agents_target: # self.targets[target_pos] += 1 def get(self, handle): obs_targets = np.zeros((self.env.height, self.env.width, 2)) ... ... @@ -583,12 +564,6 @@ class GlobalObsForRailEnvDirectionDependent(ObservationBuilder): bitlist = [int(digit) for digit in bin(self.env.rail.get_transitions((i, j)))[2:]] bitlist = [0] * (16 - len(bitlist)) + bitlist self.rail_obs[i, j] = np.array(bitlist) # self.rail_obs[i, j] = np.array( # list(f'{self.env.rail.get_transitions((i, j)):016b}')).astype(int) # self.targets = np.zeros(self.env.height, self.env.width) # for target_pos in self.env.agents_target: # self.targets[target_pos] += 1 def get(self, handle): obs_targets = np.zeros((self.env.height, self.env.width, 2)) ... ... @@ -667,11 +642,6 @@ class LocalObsForRailEnv(ObservationBuilder): agents = self.env.agents agent = agents[handle] # left_offset = max(0, agent.position[1] - 1 - self.view_radius) # right_offset = min(self.env.width, agent.position[1] + 1 + self.view_radius) # top_offset = max(0, agent.position[0] - 1 - self.view_radius) # bottom_offset = min(0, agent.position[0] + 1 + self.view_radius) local_rail_obs = self.rail_obs[agent.position[0]: agent.position[0] + 2 * self.view_radius + 1, agent.position[1]:agent.position[1] + 2 * self.view_radius + 1] ... ...
 ... ... @@ -96,9 +96,6 @@ class RailEnv(Environment): self.width = width self.height = height # use get_num_agents() instead # self.number_of_agents = number_of_agents self.obs_builder = obs_builder_object self.obs_builder._set_env(self) ... ... @@ -118,11 +115,7 @@ class RailEnv(Environment): self.obs_dict = {} self.rewards_dict = {} self.dev_obs_dict = {} # self.agents_handles = list(range(self.number_of_agents)) # self.agents_position = [] # self.agents_target = [] # self.agents_direction = [] self.agents = [None] * number_of_agents # live agents self.agents_static = [None] * number_of_agents # static agent information self.num_resets = 0 ... ... @@ -166,16 +159,12 @@ class RailEnv(Environment): if replace_agents: self.agents_static = EnvAgentStatic.from_lists(*tRailAgents[1:4]) # Take the agent static info and put (live) agents at the start positions # self.agents = EnvAgent.list_from_static(self.agents_static[:len(self.agents_handles)]) self.restart_agents() self.num_resets += 1 # for handle in self.agents_handles: # self.dones[handle] = False # TODO perhaps dones should be part of each agent. self.dones = dict.fromkeys(list(range(self.get_num_agents())) + ["__all__"], False) # perhaps dones should be part of each agent. # Reset the state of the observation builder with the new environment self.obs_builder.reset() ... ... @@ -196,8 +185,6 @@ class RailEnv(Environment): # Reset the step rewards self.rewards_dict = dict() # for handle in self.agents_handles: # self.rewards_dict[handle] = 0 for iAgent in range(self.get_num_agents()): self.rewards_dict[iAgent] = 0 ... ... @@ -207,8 +194,6 @@ class RailEnv(Environment): # for i in range(len(self.agents_handles)): for iAgent in range(self.get_num_agents()): # handle = self.agents_handles[i] transition_isValid = None agent = self.agents[iAgent] if iAgent not in action_dict: # no action has been supplied for this agent ... ... @@ -219,8 +204,6 @@ class RailEnv(Environment): action_dict[iAgent] = RailEnvActions.DO_NOTHING if self.dones[iAgent]: # this agent has already completed... # print("rail_env.py @", currentframe().f_back.f_lineno, " agent ", iAgent, # "has already completed : why action will not be executed!!!!? ADRIAN") continue action = action_dict[iAgent] ... ... @@ -275,22 +258,12 @@ class RailEnv(Environment): # the action was not valid, add penalty self.rewards_dict[iAgent] += invalid_action_penalty # if agent is not in target position, add step penalty # if self.agents_position[i][0] == self.agents_target[i][0] and \ # self.agents_position[i][1] == self.agents_target[i][1]: # self.dones[handle] = True if np.equal(agent.position, agent.target).all(): self.dones[iAgent] = True else: self.rewards_dict[iAgent] += step_penalty # Check for end of episode + add global reward to all rewards! # num_agents_in_target_position = 0 # for i in range(self.number_of_agents): # if self.agents_position[i][0] == self.agents_target[i][0] and \ # self.agents_position[i][1] == self.agents_target[i][1]: # num_agents_in_target_position += 1 # if num_agents_in_target_position == self.number_of_agents: if np.all([np.array_equal(agent2.position, agent2.target) for agent2 in self.agents]): self.dones["__all__"] = True self.rewards_dict = [0 * r + global_reward for r in self.rewards_dict] ... ... @@ -301,8 +274,6 @@ class RailEnv(Environment): return self._get_observations(), self.rewards_dict, self.dones, {} def _check_action_on_agent(self, action, agent): # pos = agent.position # self.agents_position[i] # direction = agent.direction # self.agents_direction[i] # compute number of possible transitions in the current # cell used to check for invalid actions new_direction, transition_isValid = self.check_action(agent, action) ... ... @@ -311,13 +282,6 @@ class RailEnv(Environment): # 1) transition allows the new_direction in the cell, # 2) the new cell is not empty (case 0), # 3) the cell is free, i.e., no agent is currently in that cell # if ( # new_position[1] >= self.width or # new_position[0] >= self.height or # new_position[0] < 0 or new_position[1] < 0): # new_cell_isValid = False # if self.rail.get_transitions(new_position) == 0: # new_cell_isValid = False new_cell_isValid = ( np.array_equal( # Check the new position is still in the grid new_position, ... ... @@ -329,11 +293,6 @@ class RailEnv(Environment): transition_isValid = self.rail.get_transition( (*agent.position, agent.direction), new_direction) # cell_isFree = True # for j in range(self.number_of_agents): # if self.agents_position[j] == new_position: # cell_isFree = False # break # Check the new position is not the same as any of the existing agent positions # (including itself, for simplicity, since it is moving) cell_isFree = not np.any( ... ... @@ -351,7 +310,6 @@ class RailEnv(Environment): num_transitions = np.count_nonzero(possible_transitions) new_direction = agent.direction # print(nbits,np.sum(possible_transitions)) if action == RailEnvActions.MOVE_LEFT: new_direction = agent.direction - 1 if num_transitions <= 1: ... ... @@ -376,7 +334,6 @@ class RailEnv(Environment): def _get_observations(self): self.obs_dict = {} self.debug_obs_dict = {} # for handle in self.agents_handles: for iAgent in range(self.get_num_agents()): self.obs_dict[iAgent] = self.obs_builder.get(iAgent) return self.obs_dict ... ... @@ -421,7 +378,6 @@ class RailEnv(Environment): self.height, self.width = self.rail.grid.shape self.rail.height = self.height self.rail.width = self.width # self.agents = [None] * self.get_num_agents() self.dones = dict.fromkeys(list(range(self.get_num_agents())) + ["__all__"], False) def save(self, filename): ... ...