Commit 2732799b authored by u214892's avatar u214892
Browse files

50 removed stale code (flake8 E800)

parent df440c58
Pipeline #922 failed with stage
in 6 minutes and 59 seconds
......@@ -335,7 +335,6 @@ class GridTransitionMap(TransitionMap):
binTrans = self.get_transitions(rcPos) # 16bit integer - all trans in/out
lnBinTrans = array([binTrans >> 8, binTrans & 0xff], dtype=np.uint8) # 2 x uint8
g2binTrans = np.unpackbits(lnBinTrans).reshape(4, 4) # 4x4 x uint8 binary(0,1)
# gDirIn = g2binTrans.any(axis=1) # inbound directions as boolean array (4)
gDirOut = g2binTrans.any(axis=0) # outbound directions as boolean array (4)
giDirOut = np.argwhere(gDirOut)[:, 0] # valid outbound directions as array of int
......
......@@ -156,9 +156,8 @@ def a_star(rail_trans, rail_array, start, end):
# create the f, g, and h values
child.g = current_node.g + 1
# this heuristic favors diagonal paths
# child.h = ((child.pos[0] - end_node.pos[0]) ** 2) + \
# ((child.pos[1] - end_node.pos[1]) ** 2)
# this heuristic favors diagonal paths:
# child.h = ((child.pos[0] - end_node.pos[0]) ** 2) + ((child.pos[1] - end_node.pos[1]) ** 2) \ # noqa: E800
# this heuristic avoids diagonal paths
child.h = abs(child.pos[0] - end_node.pos[0]) + abs(child.pos[1] - end_node.pos[1])
child.f = child.g + child.h
......@@ -199,7 +198,6 @@ def connect_rail(rail_trans, rail_array, start, end):
else:
# into existing rail
new_trans = rail_trans.set_transition(new_trans, current_dir, new_dir, 1)
# new_trans = rail_trans.set_transition(new_trans, mirror(new_dir), mirror(current_dir), 1)
else:
# set the forward path
new_trans = rail_trans.set_transition(new_trans, current_dir, new_dir, 1)
......@@ -216,7 +214,6 @@ def connect_rail(rail_trans, rail_array, start, end):
else:
# into existing rail
new_trans_e = rail_trans.set_transition(new_trans_e, new_dir, new_dir, 1)
# new_trans_e = rail_trans.set_transition(new_trans_e, mirror(new_dir), mirror(new_dir), 1)
rail_array[end_pos] = new_trans_e
current_dir = new_dir
......
......@@ -95,7 +95,6 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist=
for j in range(2):
dist = distance_on_rail(sg_new[i], sg[j])
if dist < 2:
# print("too close:", dist, sg_new[i], sg[j])
return False
return True
......@@ -105,18 +104,15 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist=
if not all_ok:
# we can might as well give up at this point
# print("\n> Complex Rail Gen: Sanity counter reached, giving up!")
break
new_path = connect_rail(rail_trans, rail_array, start, goal)
if len(new_path) >= 2:
nr_created += 1
# print(":::: path: ", new_path)
start_goal.append([start, goal])
start_dir.append(mirror(get_direction(new_path[0], new_path[1])))
else:
# after too many failures we will give up
# print("failed...")
created_sanity += 1
# add extra connections between existing rail
......@@ -139,9 +135,6 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist=
if len(new_path) >= 2:
nr_created += 1
# print("\n> Complex Rail Gen: Created #", len(start_goal), "pairs and #", nr_created, "extra connections")
# print(start_goal)
agents_position = [sg[0] for sg in start_goal[:num_agents]]
agents_target = [sg[1] for sg in start_goal[:num_agents]]
agents_direction = start_dir[:num_agents]
......@@ -351,7 +344,6 @@ def random_rail_generator(cell_type_relative_proportion=[1.0] * 11):
num_insertions = 0
while num_insertions < MAX_INSERTIONS and len(cells_to_fill) > 0:
# cell = random.sample(cells_to_fill, 1)[0]
cell = cells_to_fill[np.random.choice(len(cells_to_fill), 1)[0]]
cells_to_fill.remove(cell)
row = cell[0]
......
......@@ -37,14 +37,9 @@ class TreeObsForRailEnv(ObservationBuilder):
4))
self.max_dist = np.zeros(nAgents)
# for i in range(nAgents):
# self.max_dist[i] = self._distance_map_walker(self.env.agents_target[i], i)
self.max_dist = [self._distance_map_walker(agent.target, i) for i, agent in enumerate(agents)]
# Update local lookup table for all agents' target locations
self.location_has_target = {}
# for loc in self.env.agents_target:
# self.location_has_target[(loc[0], loc[1])] = 1
self.location_has_target = {tuple(agent.target): 1 for agent in agents}
def _distance_map_walker(self, position, target_nr):
......@@ -57,7 +52,6 @@ class TreeObsForRailEnv(ObservationBuilder):
self.distance_map[target_nr, position[0], position[1], :] = 0
# Fill in the (up to) 4 neighboring nodes
# nodes_queue = [] # list of tuples (row, col, direction, distance);
# direction is the direction of movement, meaning that at least a possible orientation of an agent
# in cell (row,col) allows a movement in direction `direction'
nodes_queue = deque(self._get_and_update_neighbors(position, target_nr, 0, enforce_target_direction=-1))
......@@ -200,9 +194,6 @@ class TreeObsForRailEnv(ObservationBuilder):
"""
# Update local lookup table for all agents' positions
# self.location_has_agent = {}
# for loc in self.env.agents_position:
# self.location_has_agent[(loc[0], loc[1])] = 1
self.location_has_agent = {tuple(agent.position): 1 for agent in self.env.agents}
if handle > len(self.env.agents):
print("ERROR: obs _get - handle ", handle, " len(agents)", len(self.env.agents))
......@@ -259,8 +250,6 @@ class TreeObsForRailEnv(ObservationBuilder):
visited = set()
# other_agent_encountered = False
# other_target_encountered = False
other_agent_encountered = np.inf
other_target_encountered = np.inf
......@@ -271,12 +260,10 @@ class TreeObsForRailEnv(ObservationBuilder):
# Modify here to compute any useful data required to build the end node's features. This code is called
# for each cell visited between the previous branching node and the next switch / target / dead-end.
if position in self.location_has_agent:
# other_agent_encountered = True
if num_steps < other_agent_encountered:
other_agent_encountered = num_steps
if position in self.location_has_target:
# other_target_encountered = True
if num_steps < other_target_encountered:
other_target_encountered = num_steps
# #############################
......@@ -519,12 +506,6 @@ class GlobalObsForRailEnv(ObservationBuilder):
bitlist = [int(digit) for digit in bin(self.env.rail.get_transitions((i, j)))[2:]]
bitlist = [0] * (16 - len(bitlist)) + bitlist
self.rail_obs[i, j] = np.array(bitlist)
# self.rail_obs[i, j] = np.array(
# list(f'{self.env.rail.get_transitions((i, j)):016b}')).astype(int)
# self.targets = np.zeros(self.env.height, self.env.width)
# for target_pos in self.env.agents_target:
# self.targets[target_pos] += 1
def get(self, handle):
obs_targets = np.zeros((self.env.height, self.env.width, 2))
......@@ -583,12 +564,6 @@ class GlobalObsForRailEnvDirectionDependent(ObservationBuilder):
bitlist = [int(digit) for digit in bin(self.env.rail.get_transitions((i, j)))[2:]]
bitlist = [0] * (16 - len(bitlist)) + bitlist
self.rail_obs[i, j] = np.array(bitlist)
# self.rail_obs[i, j] = np.array(
# list(f'{self.env.rail.get_transitions((i, j)):016b}')).astype(int)
# self.targets = np.zeros(self.env.height, self.env.width)
# for target_pos in self.env.agents_target:
# self.targets[target_pos] += 1
def get(self, handle):
obs_targets = np.zeros((self.env.height, self.env.width, 2))
......@@ -667,11 +642,6 @@ class LocalObsForRailEnv(ObservationBuilder):
agents = self.env.agents
agent = agents[handle]
# left_offset = max(0, agent.position[1] - 1 - self.view_radius)
# right_offset = min(self.env.width, agent.position[1] + 1 + self.view_radius)
# top_offset = max(0, agent.position[0] - 1 - self.view_radius)
# bottom_offset = min(0, agent.position[0] + 1 + self.view_radius)
local_rail_obs = self.rail_obs[agent.position[0]: agent.position[0] + 2 * self.view_radius + 1,
agent.position[1]:agent.position[1] + 2 * self.view_radius + 1]
......
......@@ -96,9 +96,6 @@ class RailEnv(Environment):
self.width = width
self.height = height
# use get_num_agents() instead
# self.number_of_agents = number_of_agents
self.obs_builder = obs_builder_object
self.obs_builder._set_env(self)
......@@ -118,11 +115,7 @@ class RailEnv(Environment):
self.obs_dict = {}
self.rewards_dict = {}
self.dev_obs_dict = {}
# self.agents_handles = list(range(self.number_of_agents))
# self.agents_position = []
# self.agents_target = []
# self.agents_direction = []
self.agents = [None] * number_of_agents # live agents
self.agents_static = [None] * number_of_agents # static agent information
self.num_resets = 0
......@@ -166,16 +159,12 @@ class RailEnv(Environment):
if replace_agents:
self.agents_static = EnvAgentStatic.from_lists(*tRailAgents[1:4])
# Take the agent static info and put (live) agents at the start positions
# self.agents = EnvAgent.list_from_static(self.agents_static[:len(self.agents_handles)])
self.restart_agents()
self.num_resets += 1
# for handle in self.agents_handles:
# self.dones[handle] = False
# TODO perhaps dones should be part of each agent.
self.dones = dict.fromkeys(list(range(self.get_num_agents())) + ["__all__"], False)
# perhaps dones should be part of each agent.
# Reset the state of the observation builder with the new environment
self.obs_builder.reset()
......@@ -196,8 +185,6 @@ class RailEnv(Environment):
# Reset the step rewards
self.rewards_dict = dict()
# for handle in self.agents_handles:
# self.rewards_dict[handle] = 0
for iAgent in range(self.get_num_agents()):
self.rewards_dict[iAgent] = 0
......@@ -207,8 +194,6 @@ class RailEnv(Environment):
# for i in range(len(self.agents_handles)):
for iAgent in range(self.get_num_agents()):
# handle = self.agents_handles[i]
transition_isValid = None
agent = self.agents[iAgent]
if iAgent not in action_dict: # no action has been supplied for this agent
......@@ -219,8 +204,6 @@ class RailEnv(Environment):
action_dict[iAgent] = RailEnvActions.DO_NOTHING
if self.dones[iAgent]: # this agent has already completed...
# print("rail_env.py @", currentframe().f_back.f_lineno, " agent ", iAgent,
# "has already completed : why action will not be executed!!!!? ADRIAN")
continue
action = action_dict[iAgent]
......@@ -275,22 +258,12 @@ class RailEnv(Environment):
# the action was not valid, add penalty
self.rewards_dict[iAgent] += invalid_action_penalty
# if agent is not in target position, add step penalty
# if self.agents_position[i][0] == self.agents_target[i][0] and \
# self.agents_position[i][1] == self.agents_target[i][1]:
# self.dones[handle] = True
if np.equal(agent.position, agent.target).all():
self.dones[iAgent] = True
else:
self.rewards_dict[iAgent] += step_penalty
# Check for end of episode + add global reward to all rewards!
# num_agents_in_target_position = 0
# for i in range(self.number_of_agents):
# if self.agents_position[i][0] == self.agents_target[i][0] and \
# self.agents_position[i][1] == self.agents_target[i][1]:
# num_agents_in_target_position += 1
# if num_agents_in_target_position == self.number_of_agents:
if np.all([np.array_equal(agent2.position, agent2.target) for agent2 in self.agents]):
self.dones["__all__"] = True
self.rewards_dict = [0 * r + global_reward for r in self.rewards_dict]
......@@ -301,8 +274,6 @@ class RailEnv(Environment):
return self._get_observations(), self.rewards_dict, self.dones, {}
def _check_action_on_agent(self, action, agent):
# pos = agent.position # self.agents_position[i]
# direction = agent.direction # self.agents_direction[i]
# compute number of possible transitions in the current
# cell used to check for invalid actions
new_direction, transition_isValid = self.check_action(agent, action)
......@@ -311,13 +282,6 @@ class RailEnv(Environment):
# 1) transition allows the new_direction in the cell,
# 2) the new cell is not empty (case 0),
# 3) the cell is free, i.e., no agent is currently in that cell
# if (
# new_position[1] >= self.width or
# new_position[0] >= self.height or
# new_position[0] < 0 or new_position[1] < 0):
# new_cell_isValid = False
# if self.rail.get_transitions(new_position) == 0:
# new_cell_isValid = False
new_cell_isValid = (
np.array_equal( # Check the new position is still in the grid
new_position,
......@@ -329,11 +293,6 @@ class RailEnv(Environment):
transition_isValid = self.rail.get_transition(
(*agent.position, agent.direction),
new_direction)
# cell_isFree = True
# for j in range(self.number_of_agents):
# if self.agents_position[j] == new_position:
# cell_isFree = False
# break
# Check the new position is not the same as any of the existing agent positions
# (including itself, for simplicity, since it is moving)
cell_isFree = not np.any(
......@@ -351,7 +310,6 @@ class RailEnv(Environment):
num_transitions = np.count_nonzero(possible_transitions)
new_direction = agent.direction
# print(nbits,np.sum(possible_transitions))
if action == RailEnvActions.MOVE_LEFT:
new_direction = agent.direction - 1
if num_transitions <= 1:
......@@ -376,7 +334,6 @@ class RailEnv(Environment):
def _get_observations(self):
self.obs_dict = {}
self.debug_obs_dict = {}
# for handle in self.agents_handles:
for iAgent in range(self.get_num_agents()):
self.obs_dict[iAgent] = self.obs_builder.get(iAgent)
return self.obs_dict
......@@ -421,7 +378,6 @@ class RailEnv(Environment):
self.height, self.width = self.rail.grid.shape
self.rail.height = self.height
self.rail.width = self.width
# self.agents = [None] * self.get_num_agents()
self.dones = dict.fromkeys(list(range(self.get_num_agents())) + ["__all__"], False)
def save(self, filename):
......
......@@ -67,9 +67,6 @@ class View(object):
self.nPixCell = self.oRT.gl.nPixCell
def init_widgets(self):
# self.wDrawMode = RadioButtons(options=["Draw", "Erase", "Origin", "Destination"])
# self.wDrawMode.observe(self.editor.setDrawMode, names="value")
# Debug checkbox - enable logging in the Output widget
self.wDebug = ipywidgets.Checkbox(description="Debug")
self.wDebug.observe(self.controller.setDebug, names="value")
......@@ -111,11 +108,6 @@ class View(object):
tab_contents = ["Regen", "Observation"]
for i, title in enumerate(tab_contents):
self.wTab.set_title(i, title)
# self.wTab.children = [
# VBox([self.wRegenSizeWidth, self.wRegenSizeHeight, self.wRegenNAgents,
# self.wRegenMethod, self.wReplaceAgents]),
# VBox([self.wDebug, self.wDebug_move, self.wShowObs]),
# ]
self.wTab.children = [
VBox([self.wRegenSizeWidth, self.wRegenSizeHeight, self.wRegenNAgents]),
VBox([self.wShowObs]),
......@@ -147,11 +139,8 @@ class View(object):
self.lwButtons.append(wButton)
self.wVbox_controls = VBox([
self.wFilename, # self.wDrawMode,
self.wFilename,
*self.lwButtons,
# self.wRegenSize,
# self.wRegenNAgents,
# self.wProg_steps,
self.wTab])
self.wMain = HBox([self.wImage, self.wVbox_controls])
......@@ -166,7 +155,6 @@ class View(object):
def redraw(self):
with self.wOutput:
# plt.figure(figsize=(10, 10))
self.oRT.set_new_rail()
self.model.env.agents = self.model.env.agents_static
......@@ -180,8 +168,6 @@ class View(object):
show=False, iSelectedAgent=self.model.iSelectedAgent,
show_observations=self.show_observations())
img = self.oRT.getImage()
# plt.clf()
# plt.close()
self.wImage.data = img
self.writableData = np.copy(self.wImage.data)
......@@ -283,8 +269,6 @@ class Controller(object):
if self.model.bDebug and (event["buttons"] > 0 or self.model.bDebug_move):
self.debug("debug:", len(qEvents), event)
# assert wid == self.wid_img, "wid not same as wid_img"
# If the mouse is held down, enqueue an event in our own queue
# The intention was to avoid too many redraws.
if event["buttons"] > 0:
......@@ -297,29 +281,15 @@ class Controller(object):
if len(qEvents) > 0:
tNow = time.time()
if tNow - qEvents[0][0] > 0.1: # wait before trying to draw
# height, width = wid.data.shape[:2]
# writableData = np.copy(self.wid_img.data) # writable copy of image - wid_img.data is somehow readonly
# with self.wid_img.hold_sync():
while len(qEvents) > 0:
t, x, y = qEvents.popleft() # get events from our queue
self.view.drag_path_element(x, y)
# Translate and scale from x,y to integer row,col (note order change)
# rcCell = ((array([y, x]) - self.yxBase) / self.nPixCell).astype(int)
rcCell = self.view.xy_to_rc(x, y)
self.editor.drag_path_element(rcCell)
# Store the row,col location of the click, if we have entered a new cell
# if len(lrcStroke) > 0:
# rcLast = lrcStroke[-1]
# if not np.array_equal(rcLast, rcCell): # only save at transition
# # print(y, x, rcCell)
# lrcStroke.append(rcCell)
# else:
# # This is the first cell in a mouse stroke
# lrcStroke.append(rcCell)
self.view.redisplayImage()
else:
......@@ -418,9 +388,6 @@ class EditorModel(object):
set a new env for the editor, used by load and regenerate.
"""
self.env = env
# self.yxBase = array([6, 21]) # pixel offset
# self.nPixCell = 700 / self.env.rail.width # 35
# self.oRT = rt.RenderTool(env)
def setDebug(self, bDebug):
self.bDebug = bDebug
......@@ -538,10 +505,6 @@ class EditorModel(object):
rcMiddle = rc3Cells[1] # the middle cell which we will update
bDeadend = np.all(lrcStroke[0] == lrcStroke[2]) # deadend means cell 0 == cell 2
# Save the original state of the cell
# oTransrcMiddle = self.env.rail.get_transitions(rcMiddle)
# sTransrcMiddle = self.env.rail.cell_repr(rcMiddle)
# get the 2 row, col deltas between the 3 cells, eg [[-1,0],[0,1]] = North, East
rc2Trans = np.diff(rc3Cells, axis=0)
......@@ -574,12 +537,6 @@ class EditorModel(object):
self.env.rail.set_transition((*rcMiddle, mirror(liTrans[1])),
mirror(liTrans[0]), bAddRemove, remove_deadends=not bDeadend)
# bValid = self.env.rail.is_cell_valid(rcMiddle)
# if not bValid:
# # Reset cell transition values
# self.env.rail.grid[tuple(rcMiddle)] = oTransrcMiddle
# self.log(rcMiddle, "Orig:", sTransrcMiddle, "Mod:", self.env.rail.cell_repr(rcMiddle))
if bPop:
lrcStroke.pop(0) # remove the first cell in the stroke
......@@ -623,10 +580,8 @@ class EditorModel(object):
def clear(self):
self.env.rail.grid[:, :] = 0
# self.env.number_of_agents = 0
self.env.agents = []
self.env.agents_static = []
# self.env.agents_handles = []
self.player = None
self.redraw()
......@@ -637,8 +592,6 @@ class EditorModel(object):
self.redraw()
def reset(self, replace_agents=False, nAgents=0):
# if replace_agents:
# self.env.agents_handles = range(nAgents)
self.regenerate("complex", nAgents=nAgents)
self.redraw()
......@@ -648,13 +601,11 @@ class EditorModel(object):
self.redraw()
def setFilename(self, filename):
# self.log("filename = ", filename, type(filename))
self.env_filename = filename
def load(self):
if os.path.exists(self.env_filename):
self.log("load file: ", self.env_filename)
# self.env.rail.load_transition_map(self.env_filename, override_gridsize=True)
self.env.load(self.env_filename)
if not self.regen_size_height == self.env.height or not self.regen_size_width == self.env.width:
self.regen_size_height = self.env.height
......@@ -673,7 +624,6 @@ class EditorModel(object):
def save(self):
self.log("save to ", self.env_filename, " working dir: ", os.getcwd())
# self.env.rail.save_transition_map(self.env_filename)
temp_store = self.env.agents
# clear agents before save , because we want the "init" position of the agent to expert
self.env.agents = []
......@@ -697,7 +647,6 @@ class EditorModel(object):
self.env = RailEnv(width=self.regen_size_width,
height=self.regen_size_height,
rail_generator=fnMethod,
# number_of_agents=self.env.get_num_agents(),
number_of_agents=nAgents,
obs_builder_object=TreeObsForRailEnv(max_depth=2))
else:
......@@ -707,7 +656,6 @@ class EditorModel(object):
self.set_env(self.env)
self.player = None
self.view.new_env()
# self.view.init_canvas() # Can't do init_canvas - need to keep the same canvas widget!
self.redraw()
def setRegenSizeWidth(self, size):
......@@ -777,7 +725,6 @@ class EditorModel(object):
def bg_updater(self, wProg_steps):
try:
for i in range(20):
# self.log("step ", i)
self.step()
time.sleep(0.2)
wProg_steps.value = i + 1 # indicate progress on bar
......
......@@ -36,7 +36,6 @@ class PILGL(GraphicsLayer):
self.yxBase = (0, 0)
self.linewidth = 4
self.nAgentColors = 1 # overridden in loadAgent
# self.tile_size = self.nPixCell
self.width = width
self.height = height
......@@ -65,7 +64,6 @@ class PILGL(GraphicsLayer):
self.draws = []
self.tColBg = (255, 255, 255) # white background
# self.tColBg = (220, 120, 40) # background color
self.tColRail = (0, 0, 0) # black rails
self.tColGrid = (230,) * 3 # light grey for grid
......@@ -76,10 +74,8 @@ class PILGL(GraphicsLayer):
self.nAgentColors = len(self.ltAgentColors)
self.window_open = False
# self.bShow = show
self.firstFrame = True
self.create_layers()
# self.beginFrame()
def rgb_s2i(self, sRGB):
""" convert a hex RGB string like 0091ea to 3-tuple of ints """
......@@ -106,12 +102,10 @@ class PILGL(GraphicsLayer):
self.draws[layer].rectangle([(x - r, y - r), (x + r, y + r)], fill=color, outline=color)
def drawImageXY(self, pil_img, xyPixLeftTop, layer=0):
# self.layers[layer].alpha_composite(pil_img, offset=xyPixLeftTop)
if (pil_img.mode == "RGBA"):
pil_mask = pil_img
else:
pil_mask = None
# print(pil_img, pil_img.mode, xyPixLeftTop, layer)
self.layers[layer].paste(pil_img, xyPixLeftTop, pil_mask)
......@@ -124,7 +118,6 @@ class PILGL(GraphicsLayer):
self.window = tk.Tk()
self.window.title("Flatland")
self.window.configure(background='grey')
# self.window.geometry('%dx%d+%d+%d' % (self.widthPx, self.heightPx, self.xPx, self.yPx))
self.window_open = True
def close_window(self):
......@@ -167,7 +160,6 @@ class PILGL(GraphicsLayer):
def pause(self, seconds=0.00001):
pass
# plt.pause(seconds)
def alpha_composite_layers(self):
img = self.layers[0]
......@@ -225,10 +217,6 @@ class PILSVG(PILGL):
oSuper = super()
oSuper.__init__(width, height, jupyter)
# self.track = self.track = Track()
# self.lwTrack = []
# self.zug = Zug()
self.lwAgents = []
self.agents_prev = []
......@@ -239,7 +227,6 @@ class PILSVG(PILGL):
return False
def processEvents(self):
# self.app.processEvents()
time.sleep(0.001)
def clear_rails(self):
......@@ -247,7 +234,6 @@ class PILSVG(PILGL):
self.clear_agents()
def clear_agents(self):
# print("Clear Agents: ", len(self.lwAgents))
for wAgent in self.lwAgents:
self.layout.removeWidget(wAgent)
self.lwAgents = []
......@@ -268,7 +254,6 @@ class PILSVG(PILGL):
with io.BytesIO(bytesPNG) as fIn:
pil_img = Image.open(fIn)
pil_img.load()
# print(pil_img.mode)
return pil_img
......@@ -344,12 +329,6 @@ class PILSVG(PILGL):
lTrans16[iTrans] = "1"
sTrans16 = "".join(lTrans16)
binTrans = int(sTrans16, 2)
# print(sTrans, sTrans16, sFile)
# Merge the transition svg image with the background colour.
# This is a shortcut / hack and will need re-working.
# if binTrans > 0:
# svg = svg.merge(svgBG)
pilRail = self.pilFromSvgFile(sPathSvg)
......
......@@ -22,12 +22,6 @@ class RenderTool(object):
xyHalf = array([nPixHalf, -nPixHalf])
grc2xy = array([[0, -nPixCell], [nPixCell, 0]])
gGrid = array(np.meshgrid(np.arange(10), -np.arange(10))) * array([[[nPixCell]], [[nPixCell]]])
# xyPixHalf = xr.DataArray([nPixHalf, -nPixHalf],
# dims="xy",
# coords={"xy": ["x", "y"]})
# gCentres = xr.DataArray(gGrid,
# dims=["xy", "p1", "p2"],
# coords={"xy": ["x", "y"]}) + xyPixHalf
gTheta = np.linspace(0, np.pi / 2, 5)
gArc = array([np.cos(gTheta), np.sin(gTheta)]).T # from [1,0] to [0,1]
......@@ -107,13 +101,10 @@ class RenderTool(object):
# HACK: workaround dead-end transitions
if len(giTrans) == 0:
# print("Dead End", rcPos, iDir, tbTrans, giTrans)
iDirReverse = (iDir + 2) % 4
tbTrans = tuple(int(iDir2 == iDirReverse) for iDir2 in range(4))
giTrans = np.where(tbTrans)[0] # RC list of transitions
# print("Dead End2", rcPos, iDirReverse, tbTrans, giTrans)
# print("agent", array(list("NESW"))[giTrans], self.gTransRC[giTrans])
gTransRCAg = self.__class__.gTransRC[giTrans]