diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9ea7d59efee6efe12a87775613a33e99a1771904..c2002409c1db263e86a184ef005d84d1e8594431 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -62,6 +62,6 @@ benchmarks_and_profiling: - apt update - apt install -y libgl1-mesa-glx xvfb graphviz xdg-utils libcairo2-dev libjpeg-dev libgif-dev - pip install tox - - xvfb-run tox -e benchmarks -v --recreate + - xvfb-run tox -e benchmarks,profiling -v --recreate diff --git a/benchmarks/benchmark_all_examples.py b/benchmarks/benchmark_all_examples.py new file mode 100644 index 0000000000000000000000000000000000000000..676bfe16ba94d53682ff6e52d7b714d8fa1c2d8b --- /dev/null +++ b/benchmarks/benchmark_all_examples.py @@ -0,0 +1,36 @@ +import runpy +import sys +from io import StringIO +from test.support import swap_attr +from time import sleep + +import importlib_resources +import pkg_resources +from benchmarker import Benchmarker +from importlib_resources import path + +for entry in [entry for entry in importlib_resources.contents('examples') if + not pkg_resources.resource_isdir('examples', entry) + and entry.endswith(".py") + and '__init__' not in entry + and 'demo.py' not in entry + ]: + print("*****************************************************************") + print("Benchmarking {}".format(entry)) + print("*****************************************************************") + + with path('examples', entry) as file_in: + with Benchmarker(cycle=20, extra=1) as bench: + @bench(entry) + def _(_): + # prevent Benchmarker from doing "ZeroDivisionError: float division by zero: + # ratio = base_time / real_time" + sleep(0.001) + # In order to pipe input into examples that have input(), + # we use the test package, which is meant for internal use by Python only internal and + # Any use of this package outside of Python’s standard library is discouraged as code (..) + # can change or be removed without notice between releases of Python. + # https://docs.python.org/3/library/test.html + # TODO remove input() from examples? + with swap_attr(sys, "stdin", StringIO("q")): + runpy.run_path(file_in, run_name="__main__") diff --git a/benchmarks/profile_all_examples.py b/benchmarks/profile_all_examples.py new file mode 100644 index 0000000000000000000000000000000000000000..0b6db571e263567e72dc0f955ce478a9384a02d6 --- /dev/null +++ b/benchmarks/profile_all_examples.py @@ -0,0 +1,36 @@ +import cProfile +import runpy +import sys +from io import StringIO +from test.support import swap_attr + +import importlib_resources +import pkg_resources +from importlib_resources import path + + +def profile(resource, entry): + with path(resource, entry) as file_in: + # we use the test package, which is meant for internal use by Python only internal and + # Any use of this package outside of Python’s standard library is discouraged as code (..) + # can change or be removed without notice between releases of Python. + # https://docs.python.org/3/library/test.html + # TODO remove input() from examples + print("*****************************************************************") + print("Profiling {}".format(entry)) + print("*****************************************************************") + with swap_attr(sys, "stdin", StringIO("q")): + global my_func + + def my_func(): runpy.run_path(file_in, run_name="__main__") + + cProfile.run('my_func()', sort='time') + + +for entry in [entry for entry in importlib_resources.contents('examples') if + not pkg_resources.resource_isdir('examples', entry) + and entry.endswith(".py") + and '__init__' not in entry + and 'demo.py' not in entry + ]: + profile('examples', entry) diff --git a/benchmarks/complex_rail_benchmark.py b/examples/complex_rail_benchmark.py similarity index 92% rename from benchmarks/complex_rail_benchmark.py rename to examples/complex_rail_benchmark.py index 1f6985dcdb374c09f5857e44c4fc484bd62d9c6e..44e4b534c2f2dfa63cab385d009c9afd92285f48 100644 --- a/benchmarks/complex_rail_benchmark.py +++ b/examples/complex_rail_benchmark.py @@ -2,7 +2,6 @@ import random import numpy as np -from benchmarker import Benchmarker from flatland.envs.generators import complex_rail_generator from flatland.envs.rail_env import RailEnv @@ -65,7 +64,4 @@ def run_benchmark(): if __name__ == "__main__": - with Benchmarker(cycle=20, extra=1) as bench: - @bench("Everything") - def _(bm): - run_benchmark() + run_benchmark() diff --git a/examples/demo.py b/examples/demo.py index 783df56e8b8b534450031cc9e40a2bd30ef11ebd..06f8f5bd6ff998359a633682d43dba2076545eec 100644 --- a/examples/demo.py +++ b/examples/demo.py @@ -151,12 +151,14 @@ class Demo: def run_example_flatland_000(): demo_flatland_000 = Demo(Scenario_Generator.load_scenario('example_flatland_000.pkl')) demo_flatland_000.renderer.resize() + demo_flatland_000.set_max_framerate(5) demo_flatland_000.run_demo(60) @staticmethod def run_example_flatland_001(): demo_flatland_000 = Demo(Scenario_Generator.load_scenario('example_flatland_001.pkl')) demo_flatland_000.renderer.resize() + demo_flatland_000.set_max_framerate(5) demo_flatland_000.set_record_frames(os.path.join(__file_dirname__, '..', 'rendering', 'frame_{:04d}.bmp')) demo_flatland_000.run_demo(60) diff --git a/examples/simple_example_3.py b/examples/simple_example_3.py index 1661ef65a9a33f3b44a098caaf83317919722398..e015b3c88cf05a8d047f15dfaf88e8a2fd9ce789 100644 --- a/examples/simple_example_3.py +++ b/examples/simple_example_3.py @@ -2,7 +2,7 @@ import random import numpy as np -from flatland.envs.generators import random_rail_generator +from flatland.envs.generators import random_rail_generator, complex_rail_generator from flatland.envs.observations import TreeObsForRailEnv from flatland.envs.rail_env import RailEnv from flatland.utils.rendertools import RenderTool diff --git a/flatland/envs/agent_utils.py b/flatland/envs/agent_utils.py index 8e9ffb99d06176416dfbe2b65bcca09723b1a56c..aa46aecd4b69b6a13b11b63223123b16dd69e3ac 100644 --- a/flatland/envs/agent_utils.py +++ b/flatland/envs/agent_utils.py @@ -28,19 +28,34 @@ class EnvAgentStatic(object): position = attrib() direction = attrib() target = attrib() - moving = attrib() - - def __init__(self, position, direction, target, moving=False): + moving = attrib(default=False) + # speed_data: speed is added to position_fraction on each moving step, until position_fraction>=1.0, + # after which 'transition_action_on_cellexit' is executed (equivalent to executing that action in the previous + # cell if speed=1, as default) + speed_data = attrib(default=dict({'position_fraction': 0.0, 'speed': 1.0, 'transition_action_on_cellexit': 0})) + + def __init__(self, + position, + direction, + target, + moving=False, + speed_data={'position_fraction': 0.0, 'speed': 1.0, 'transition_action_on_cellexit': 0}): self.position = position self.direction = direction self.target = target self.moving = moving + self.speed_data = speed_data @classmethod - def from_lists(cls, positions, directions, targets): + def from_lists(cls, positions, directions, targets, speeds=None): """ Create a list of EnvAgentStatics from lists of positions, directions and targets """ - return list(starmap(EnvAgentStatic, zip(positions, directions, targets, [False] * len(positions)))) + speed_datas = [] + for i in range(len(positions)): + speed_datas.append({'position_fraction': 0.0, + 'speed': speeds[i] if speeds is not None else 1.0, + 'transition_action_on_cellexit': 0}) + return list(starmap(EnvAgentStatic, zip(positions, directions, targets, [False] * len(positions), speed_datas))) def to_list(self): @@ -54,7 +69,7 @@ class EnvAgentStatic(object): if type(lTarget) is np.ndarray: lTarget = lTarget.tolist() - return [lPos, int(self.direction), lTarget, int(self.moving)] + return [lPos, int(self.direction), lTarget, int(self.moving), self.speed_data] @attrs @@ -78,7 +93,7 @@ class EnvAgent(EnvAgentStatic): def to_list(self): return [ self.position, self.direction, self.target, self.handle, - self.old_direction, self.old_position, self.moving] + self.old_direction, self.old_position, self.moving, self.speed_data] @classmethod def from_static(cls, oStatic): diff --git a/flatland/envs/generators.py b/flatland/envs/generators.py index f644bc120d4b514f1c54e0330cfc8dc4654a4f4e..ca14667424d2c93d1466e3b7e96c2e5c1fbd41e5 100644 --- a/flatland/envs/generators.py +++ b/flatland/envs/generators.py @@ -18,7 +18,7 @@ def empty_rail_generator(): rail_array = grid_map.grid rail_array.fill(0) - return grid_map, [], [], [] + return grid_map, [], [], [], [] return generator @@ -75,8 +75,9 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist= while nr_created < nr_start_goal and created_sanity < sanity_max: all_ok = False for _ in range(sanity_max): - start = (np.random.randint(0, width), np.random.randint(0, height)) - goal = (np.random.randint(0, height), np.random.randint(0, height)) + start = (np.random.randint(0, height), np.random.randint(0, width)) + goal = (np.random.randint(0, height), np.random.randint(0, width)) + # check to make sure start,goal pos is empty? if rail_array[goal] != 0 or rail_array[start] != 0: continue @@ -121,8 +122,8 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist= while nr_created < nr_extra and created_sanity < sanity_max: all_ok = False for _ in range(sanity_max): - start = (np.random.randint(0, width), np.random.randint(0, height)) - goal = (np.random.randint(0, height), np.random.randint(0, height)) + start = (np.random.randint(0, height), np.random.randint(0, width)) + goal = (np.random.randint(0, height), np.random.randint(0, width)) # check to make sure start,goal pos are not empty if rail_array[goal] == 0 or rail_array[start] == 0: continue @@ -139,7 +140,7 @@ def complex_rail_generator(nr_start_goal=1, nr_extra=100, min_dist=20, max_dist= agents_target = [sg[1] for sg in start_goal[:num_agents]] agents_direction = start_dir[:num_agents] - return grid_map, agents_position, agents_direction, agents_target + return grid_map, agents_position, agents_direction, agents_target, [1.0]*len(agents_position) return generator @@ -183,7 +184,7 @@ def rail_from_manual_specifications_generator(rail_spec): rail, num_agents) - return rail, agents_position, agents_direction, agents_target + return rail, agents_position, agents_direction, agents_target, [1.0]*len(agents_position) return generator @@ -209,7 +210,7 @@ def rail_from_GridTransitionMap_generator(rail_map): rail_map, num_agents) - return rail_map, agents_position, agents_direction, agents_target + return rail_map, agents_position, agents_direction, agents_target, [1.0]*len(agents_position) return generator @@ -482,6 +483,6 @@ def random_rail_generator(cell_type_relative_proportion=[1.0] * 11): return_rail, num_agents) - return return_rail, agents_position, agents_direction, agents_target + return return_rail, agents_position, agents_direction, agents_target, [1.0]*len(agents_position) return generator diff --git a/flatland/envs/rail_env.py b/flatland/envs/rail_env.py index c22e1c5120b54a170f9c59bb54c7666ca910f086..8cf6d52f383ec8f4e271eb0765d32bc0c763307a 100644 --- a/flatland/envs/rail_env.py +++ b/flatland/envs/rail_env.py @@ -73,7 +73,7 @@ class RailEnv(Environment): random_rail_generator : generate a random rail of given size rail_from_GridTransitionMap_generator(rail_map) : generate a rail from a GridTransitionMap object - rail_from_manual_specifications_generator(rail_spec) : generate a rail from + rail_from_manual_sp ecifications_generator(rail_spec) : generate a rail from a rail specifications array TODO: generate_rail_from_saved_list or from list of ndarray bitmaps --- width : int @@ -101,7 +101,6 @@ class RailEnv(Environment): self.action_space = [1] self.observation_space = self.obs_builder.observation_space # updated on resets? - self.actions = [0] * number_of_agents self.rewards = [0] * number_of_agents self.done = False @@ -152,7 +151,7 @@ class RailEnv(Environment): self.rail = tRailAgents[0] if replace_agents: - self.agents_static = EnvAgentStatic.from_lists(*tRailAgents[1:4]) + self.agents_static = EnvAgentStatic.from_lists(*tRailAgents[1:5]) self.restart_agents() @@ -193,28 +192,26 @@ class RailEnv(Environment): for iAgent in range(self.get_num_agents()): agent = self.agents[iAgent] - if iAgent not in action_dict: # no action has been supplied for this agent - if agent.moving: - # Keep moving - # Change MOVE_FORWARD to DO_NOTHING - action_dict[iAgent] = RailEnvActions.DO_NOTHING - else: - action_dict[iAgent] = RailEnvActions.DO_NOTHING - if self.dones[iAgent]: # this agent has already completed... continue - action = action_dict[iAgent] - if action < 0 or action > len(RailEnvActions): - print('ERROR: illegal action=', action, - 'for agent with index=', iAgent) - return + if iAgent not in action_dict: # no action has been supplied for this agent + action_dict[iAgent] = RailEnvActions.DO_NOTHING + + if action_dict[iAgent] < 0 or action_dict[iAgent] > len(RailEnvActions): + print('ERROR: illegal action=', action_dict[iAgent], + 'for agent with index=', iAgent, + '"DO NOTHING" will be executed instead') + action_dict[iAgent] = RailEnvActions.DO_NOTHING + + action = action_dict[iAgent] if action == RailEnvActions.DO_NOTHING and agent.moving: # Keep moving action = RailEnvActions.MOVE_FORWARD - if action == RailEnvActions.STOP_MOVING and agent.moving: + if action == RailEnvActions.STOP_MOVING and agent.moving and agent.speed_data['position_fraction'] < 0.01: + # Only allow halting an agent on entering new cells. agent.moving = False self.rewards_dict[iAgent] += stop_penalty @@ -223,47 +220,78 @@ class RailEnv(Environment): agent.moving = True self.rewards_dict[iAgent] += start_penalty - if action != RailEnvActions.DO_NOTHING and action != RailEnvActions.STOP_MOVING: - cell_isFree, new_cell_isValid, new_direction, new_position, transition_isValid = \ - self._check_action_on_agent(action, agent) - if all([new_cell_isValid, transition_isValid, cell_isFree]): - agent.old_direction = agent.direction - agent.old_position = agent.position - agent.position = new_position - agent.direction = new_direction - else: - # Logic: if the chosen action is invalid, - # and it was LEFT or RIGHT, and the agent was moving, then keep moving FORWARD. - if (action == RailEnvActions.MOVE_LEFT or action == RailEnvActions.MOVE_RIGHT) and agent.moving: - cell_isFree, new_cell_isValid, new_direction, new_position, transition_isValid = \ - self._check_action_on_agent(RailEnvActions.MOVE_FORWARD, agent) - - if all([new_cell_isValid, transition_isValid, cell_isFree]): - agent.old_direction = agent.direction - agent.old_position = agent.position - agent.position = new_position - agent.direction = new_direction + # Now perform a movement. + # If the agent is in an initial position within a new cell (agent.speed_data['position_fraction']<eps) + # store the desired action in `transition_action_on_cellexit' (only if the desired transition is + # allowed! otherwise DO_NOTHING!) + # Then in any case (if agent.moving) and the `transition_action_on_cellexit' is valid, increment the + # position_fraction by the speed of the agent (regardless of action taken, as long as no + # STOP_MOVING, but that makes agent.moving=False) + # If the new position fraction is >= 1, reset to 0, and perform the stored + # transition_action_on_cellexit + + # If the agent can make an action + action_selected = False + if agent.speed_data['position_fraction'] < 0.01: + if action != RailEnvActions.DO_NOTHING and action != RailEnvActions.STOP_MOVING: + cell_isFree, new_cell_isValid, new_direction, new_position, transition_isValid = \ + self._check_action_on_agent(action, agent) + + if all([new_cell_isValid, transition_isValid, cell_isFree]): + agent.speed_data['transition_action_on_cellexit'] = action + action_selected = True + + else: + # But, if the chosen invalid action was LEFT/RIGHT, and the agent is moving, + # try to keep moving forward! + if (action == RailEnvActions.MOVE_LEFT or action == RailEnvActions.MOVE_RIGHT) and agent.moving: + cell_isFree, new_cell_isValid, new_direction, new_position, transition_isValid = \ + self._check_action_on_agent(RailEnvActions.MOVE_FORWARD, agent) + + if all([new_cell_isValid, transition_isValid, cell_isFree]): + agent.speed_data['transition_action_on_cellexit'] = RailEnvActions.MOVE_FORWARD + action_selected = True + + else: + # TODO: an invalid action was chosen after entering the cell. The agent cannot move. + self.rewards_dict[iAgent] += invalid_action_penalty + agent.moving = False + self.rewards_dict[iAgent] += stop_penalty + continue else: - # the action was not valid, add penalty + # TODO: an invalid action was chosen after entering the cell. The agent cannot move. self.rewards_dict[iAgent] += invalid_action_penalty + agent.moving = False + self.rewards_dict[iAgent] += stop_penalty + continue - else: - # the action was not valid, add penalty - self.rewards_dict[iAgent] += invalid_action_penalty + if agent.moving and (action_selected or agent.speed_data['position_fraction'] >= 0.01): + agent.speed_data['position_fraction'] += agent.speed_data['speed'] + + if agent.speed_data['position_fraction'] >= 1.0: + agent.speed_data['position_fraction'] = 0.0 + + # Perform stored action to transition to the next cell + + # Now 'transition_action_on_cellexit' will be guaranteed to be valid; it was checked on entering + # the cell + cell_isFree, new_cell_isValid, new_direction, new_position, transition_isValid = \ + self._check_action_on_agent(agent.speed_data['transition_action_on_cellexit'], agent) + agent.old_direction = agent.direction + agent.old_position = agent.position + agent.position = new_position + agent.direction = new_direction if np.equal(agent.position, agent.target).all(): self.dones[iAgent] = True else: - self.rewards_dict[iAgent] += step_penalty + self.rewards_dict[iAgent] += step_penalty * agent.speed_data['speed'] # Check for end of episode + add global reward to all rewards! if np.all([np.array_equal(agent2.position, agent2.target) for agent2 in self.agents]): self.dones["__all__"] = True self.rewards_dict = [0 * r + global_reward for r in self.rewards_dict] - # Reset the step actions (in case some agent doesn't 'register_action' - # on the next step) - self.actions = [0] * self.get_num_agents() return self._get_observations(), self.rewards_dict, self.dones, {} def _check_action_on_agent(self, action, agent): @@ -271,6 +299,7 @@ class RailEnv(Environment): # cell used to check for invalid actions new_direction, transition_isValid = self.check_action(agent, action) new_position = get_new_position(agent.position, new_direction) + # Is it a legal move? # 1) transition allows the new_direction in the cell, # 2) the new cell is not empty (case 0), @@ -281,11 +310,13 @@ class RailEnv(Environment): np.clip(new_position, [0, 0], [self.height - 1, self.width - 1])) and # check the new position has some transitions (ie is not an empty cell) self.rail.get_transitions(new_position) > 0) + # If transition validity hasn't been checked yet. if transition_isValid is None: transition_isValid = self.rail.get_transition( (*agent.position, agent.direction), new_direction) + # Check the new position is not the same as any of the existing agent positions # (including itself, for simplicity, since it is moving) cell_isFree = not np.any( diff --git a/flatland/utils/graphics_pil.py b/flatland/utils/graphics_pil.py index f3a27c59c9701d23084c232cc842f79534e4401c..45733defbf19227ed90bfafb5cee0abf90e36c4e 100644 --- a/flatland/utils/graphics_pil.py +++ b/flatland/utils/graphics_pil.py @@ -129,7 +129,9 @@ class PILGL(GraphicsLayer): def open_window(self): assert self.window_open is False, "Window is already open!" - self.window = tk.Tk() + # use tk.Toplevel() instead of tk.Tk() + # https://stackoverflow.com/questions/26097811/image-pyimage2-doesnt-exist + self.window = tk.Toplevel() self.window.title("Flatland") self.window.configure(background='grey') self.window_open = True diff --git a/notebooks/simple_example_3_manual_control.ipynb b/notebooks/simple_example_3_manual_control.ipynb index 6527e1264af3d2a06fe40e958119dd364d86f87e..369590cf522f18d103df8e14430e81238f7cd87c 100644 --- a/notebooks/simple_example_3_manual_control.ipynb +++ b/notebooks/simple_example_3_manual_control.ipynb @@ -5,9 +5,12 @@ "metadata": {}, "source": [ "### Simple Example 3 - Manual Control\n", - "This opens a separate window, and a text box in the Jupyter notebook.\n", "\n", - "eg Enter `\"0 2 s<enter>\"` to tell agent 0 to step forward, and step the environment.\n", + "By default this runs a few \"move forward\" actions for two agents, in a separate window.\n", + "\n", + "If you uncomment the \"input\" line below, it opens a text box in the Jupyter notebook, allowing basic manual control.\n", + "\n", + "eg Enter `\"0 2 s<enter>\"` to tell agent 0 to move forward, and step the environment.\n", "\n", "You should be able to see the red agent step forward, and get a reward from the env, looking like this:\n", "\n", @@ -36,6 +39,7 @@ "source": [ "import random\n", "import numpy as np\n", + "import time\n", "from flatland.envs.generators import random_rail_generator\n", "from flatland.envs.observations import TreeObsForRailEnv\n", "from flatland.envs.rail_env import RailEnv\n", @@ -66,12 +70,26 @@ "# env_renderer = RenderTool(env, gl=\"PILSVG\")\n", "\n", "env_renderer.renderEnv(show=True, frames=True)\n", - "env_renderer.renderEnv(show=True, frames=True)\n", "\n", "print(\"Manual control: s=perform step, q=quit, [agent id] [1-2-3 action] \\\n", - " (turnleft+move, move to front, turnright+move)\")\n", - "for step in range(100):\n", - " cmd = input(\">> \")\n", + " (turnleft+move, move to front, turnright+move)\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for step in range(10):\n", + "\n", + " # This is an example command, setting agent 0's action to 2 (move forward), and agent 1's action to 2, \n", + " # then stepping the environment.\n", + " cmd = \"0 2 1 2 s\"\n", + " \n", + " # uncomment this input statement if you want to try interactive manual commands\n", + " # cmd = input(\">> \")\n", + " \n", " cmds = cmd.split(\" \")\n", "\n", " action_dict = {}\n", @@ -93,7 +111,9 @@ " i = i + 1\n", " i += 1\n", "\n", - " env_renderer.renderEnv(show=True, frames=True)" + " env_renderer.renderEnv(show=True, frames=True)\n", + " \n", + " time.sleep(0.3)" ] } ], diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/test_environments.py b/tests/test_environments.py index 11f0acba2fd54df63c62047f8559897e7d222e72..aa24467dd1d548a2b68a408f300089ee8135c639 100644 --- a/tests/test_environments.py +++ b/tests/test_environments.py @@ -3,7 +3,7 @@ import numpy as np from flatland.core.transition_map import GridTransitionMap -from flatland.core.transitions import Grid4Transitions +from flatland.core.transitions import Grid4Transitions, RailEnvTransitions from flatland.envs.agent_utils import EnvAgent from flatland.envs.generators import complex_rail_generator from flatland.envs.generators import rail_from_GridTransitionMap_generator @@ -53,7 +53,7 @@ def test_rail_environment_single_agent(): # | | | # \_/\_/ - transitions = Grid4Transitions([]) + transitions = RailEnvTransitions() vertical_line = cells[1] south_symmetrical_switch = cells[6] north_symmetrical_switch = transitions.rotate_transition(south_symmetrical_switch, 180) @@ -107,6 +107,7 @@ def test_rail_environment_single_agent(): if prev_pos != pos: valid_active_actions_done += 1 + # After 6 movements on this railway network, the train should be back # to its original height on the map. assert (initial_pos[0] == agent.position[0]) @@ -121,9 +122,9 @@ def test_rail_environment_single_agent(): action = np.random.randint(4) _, _, dones, _ = rail_env.step({0: action}) - done = dones['__all__'] +test_rail_environment_single_agent() def test_dead_end(): transitions = Grid4Transitions([]) diff --git a/tox.ini b/tox.ini index 61f8b0a9427b8d09e51540c7e047026c56466693..b64a20a8db0e6e3df846a0c05bd1e372f93f9136 100644 --- a/tox.ini +++ b/tox.ini @@ -60,7 +60,23 @@ deps = -r{toxinidir}/requirements_dev.txt -r{toxinidir}/requirements_continuous_integration.txt commands = - sh -c 'ls benchmarks/*.py | xargs -n 1 python' + python benchmarks/benchmark_all_examples.py + +[testenv:profiling] +basepython = python +setenv = + PYTHONPATH = {toxinidir} +passenv = + DISPLAY +; HTTP_PROXY+HTTPS_PROXY required behind corporate proxies + HTTP_PROXY + HTTPS_PROXY +whitelist_externals = sh +deps = + -r{toxinidir}/requirements_dev.txt + -r{toxinidir}/requirements_continuous_integration.txt +commands = + python benchmarks/profile_all_examples.py [testenv:examples] basepython = python