text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_transform(self) -> str: """ Actual is measured data Expected is based on mechanical drawings of the robot This method computes the transformation matrix from actual -> expected. Saves this transform to disc. """ |
expected = [self._expected_points[p][:2] for p in [1, 2, 3]]
log.debug("save_transform expected: {}".format(expected))
actual = [self.actual_points[p][:2] for p in [1, 2, 3]]
log.debug("save_transform actual: {}".format(actual))
# Generate a 2 dimensional transform matrix from the two matricies
flat_matrix = solve(expected, actual)
log.debug("save_transform flat_matrix: {}".format(flat_matrix))
current_z = self.calibration_matrix[2][3]
# Add the z component to form the 3 dimensional transform
self.calibration_matrix = add_z(flat_matrix, current_z)
gantry_calibration = list(
map(lambda i: list(i), self.calibration_matrix))
log.debug("save_transform calibration_matrix: {}".format(
gantry_calibration))
self.hardware.update_config(gantry_calibration=gantry_calibration)
res = str(self.hardware.config)
return '{}\n{}'.format(res, save_config(self.hardware.config)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_config(prefix: str) -> str: """ Find the most recent config matching `prefix` """ |
matches = [conf for conf in config_models if conf.startswith(prefix)]
if not matches:
raise KeyError('No match found for prefix {}'.format(prefix))
if prefix in matches:
return prefix
else:
return sorted(matches)[0] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_attached_instruments( self, expected: Dict[types.Mount, str])\ -> Dict[types.Mount, Dict[str, Optional[str]]]: """ Update the internal cache of attached instruments. This method allows after-init-time specification of attached simulated instruments. The method will return - the instruments specified at init-time, or if those do not exists, - the instruments specified in expected, or if that is not passed, - nothing :param expected: A mapping of mount to instrument model prefixes. When loading instruments from a prefix, we return the lexically-first model that matches the prefix. If the models specified in expected do not match the models specified in the `attached_instruments` argument of :py:meth:`__init__`, :py:attr:`RuntimeError` is raised. :raises RuntimeError: If an instrument is expected but not found. :returns: A dict of mount to either instrument model names or `None`. """ |
to_return: Dict[types.Mount, Dict[str, Optional[str]]] = {}
for mount in types.Mount:
expected_instr = expected.get(mount, None)
init_instr = self._attached_instruments.get(mount, {})
found_model = init_instr.get('model', '')
if expected_instr and found_model\
and not found_model.startswith(expected_instr):
if self._strict_attached:
raise RuntimeError(
'mount {}: expected instrument {} but got {}'
.format(mount.name, expected_instr, init_instr))
else:
to_return[mount] = {
'model': find_config(expected_instr),
'id': None}
elif found_model and expected_instr:
# Instrument detected matches instrument expected (note:
# "instrument detected" means passed as an argument to the
# constructor of this class)
to_return[mount] = init_instr
elif found_model:
# Instrument detected and no expected instrument specified
to_return[mount] = init_instr
elif expected_instr:
# Expected instrument specified and no instrument detected
to_return[mount] = {
'model': find_config(expected_instr),
'id': None}
else:
# No instrument detected or expected
to_return[mount] = {
'model': None,
'id': None}
return to_return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ensure_programmer_executable():
""" Find the lpc21isp executable and ensure it is executable """ |
# Find the lpc21isp executable, explicitly allowing the case where it
# is not executable (since that’s exactly what we’re trying to fix)
updater_executable = shutil.which('lpc21isp',
mode=os.F_OK)
# updater_executable might be None; we’re passing it here unchecked
# because if it is None, we’re about to fail when we try to program
# the smoothie, and we want the exception to bubble up.
os.chmod(updater_executable, 0o777) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def restart(request):
""" Returns OK, then waits approximately 1 second and restarts container """ |
def wait_and_restart():
log.info('Restarting server')
sleep(1)
os.system('kill 1')
Thread(target=wait_and_restart).start()
return web.json_response({"message": "restarting"}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_app(system_version_file: str = None, config_file_override: str = None, name_override: str = None, loop: asyncio.AbstractEventLoop = None) -> web.Application: """ Build and return the aiohttp.web.Application that runs the server The params can be overloaded for testing. """ |
if not system_version_file:
system_version_file = BR_BUILTIN_VERSION_FILE
version = get_version(system_version_file)
name = name_override or name_management.get_name()
config_obj = config.load(config_file_override)
LOG.info("Setup: " + '\n\t'.join([
f'Device name: {name}',
f'Buildroot version: '
f'{version.get("buildroot_version", "unknown")}',
f'\t(from git sha '
f'{version.get("buildroot_sha", "unknown")}',
f'API version: '
f'{version.get("opentrons_api_version", "unknown")}',
f'\t(from git sha '
f'{version.get("opentrons_api_sha", "unknown")}',
f'Update server version: '
f'{version.get("update_server_version", "unknown")}',
f'\t(from git sha '
f'{version.get("update_server_sha", "unknown")}',
f'Smoothie firmware version: TODO'
]))
if not loop:
loop = asyncio.get_event_loop()
app = web.Application(loop=loop, middlewares=[log_error_middleware])
app[config.CONFIG_VARNAME] = config_obj
app[constants.RESTART_LOCK_NAME] = asyncio.Lock()
app[constants.DEVICE_NAME_VARNAME] = name
app.router.add_routes([
web.get('/server/update/health',
control.build_health_endpoint(version)),
web.post('/server/update/begin', update.begin),
web.post('/server/update/cancel', update.cancel),
web.get('/server/update/{session}/status', update.status),
web.post('/server/update/{session}/file', update.file_upload),
web.post('/server/update/{session}/commit', update.commit),
web.post('/server/restart', control.restart),
web.get('/server/ssh_keys', ssh_key_management.list_keys),
web.post('/server/ssh_keys', ssh_key_management.add),
web.delete('/server/ssh_keys/{key_md5}', ssh_key_management.remove),
web.post('/server/name', name_management.set_name_endpoint),
web.get('/server/name', name_management.get_name_endpoint),
])
return app |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def on_shutdown(self, app):
""" Graceful shutdown handler See https://docs.aiohttp.org/en/stable/web.html#graceful-shutdown """ |
for ws in self.clients.copy():
await ws.close(code=WSCloseCode.GOING_AWAY,
message='Server shutdown')
self.shutdown() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def handler(self, request):
""" Receives HTTP request and negotiates up to a Websocket session """ |
def task_done(future):
self.tasks.remove(future)
exception = future.exception()
if exception:
log.warning(
'While processing message: {0}\nDetails: {1}'.format(
exception,
traceback.format_exc())
)
client = web.WebSocketResponse()
client_id = id(client)
# upgrade to Websockets
await client.prepare(request)
log.info('Opening Websocket {0}'.format(id(client)))
log.debug('Tasks: {0}'.format(self.tasks))
log.debug('Clients: {0}'.format(self.clients))
try:
log.debug('Sending root info to {0}'.format(client_id))
await client.send_json({
'$': {'type': CONTROL_MESSAGE, 'monitor': True},
'root': self.call_and_serialize(lambda: self.root),
'type': self.call_and_serialize(lambda: type(self.root))
})
log.debug('Root info sent to {0}'.format(client_id))
except Exception:
log.exception('While sending root info to {0}'.format(client_id))
try:
self.clients[client] = self.send_worker(client)
# Async receive client data until websocket is closed
async for msg in client:
task = self.loop.create_task(self.process(msg))
task.add_done_callback(task_done)
self.tasks += [task]
except Exception:
log.exception('While reading from socket:')
finally:
log.info('Closing WebSocket {0}'.format(id(client)))
await client.close()
del self.clients[client]
return client |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resolve_args(self, args):
""" Resolve function call arguments that have object ids into instances of these objects """ |
def resolve(a):
if isinstance(a, dict):
_id = a.get('i', None)
# If it's a compound type (including dict)
# Check if it has id (i) to determine that it has
# a reference in object storage. If it's None, then it's
# a dict originated at the remote
return self.objects[_id] if _id else a['v']
# if array, resolve it's elements
if isinstance(a, (list, tuple)):
return [resolve(i) for i in a]
return a
return [resolve(a) for a in args] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def build_hardware_controller( cls, config: robot_configs.robot_config = None, port: str = None, loop: asyncio.AbstractEventLoop = None, force: bool = False) -> 'API': """ Build a hardware controller that will actually talk to hardware. This method should not be used outside of a real robot, and on a real robot only one true hardware controller may be active at one time. :param config: A config to preload. If not specified, load the default. :param port: A port to connect to. If not specified, the default port (found by scanning for connected FT232Rs). :param loop: An event loop to use. If not specified, use the result of :py:meth:`asyncio.get_event_loop`. :param force: If `True`, connect even if a lockfile is present. See :py:meth:`Controller.__init__`. """ |
if None is Controller:
raise RuntimeError(
'The hardware controller may only be instantiated on a robot')
checked_loop = loop or asyncio.get_event_loop()
backend = Controller(config, checked_loop, force=force)
await backend.connect(port)
return cls(backend, config=config, loop=checked_loop) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build_hardware_simulator( cls, attached_instruments: Dict[top_types.Mount, Dict[str, Optional[str]]] = None, # noqa E501 attached_modules: List[str] = None, config: robot_configs.robot_config = None, loop: asyncio.AbstractEventLoop = None, strict_attached_instruments: bool = True) -> 'API': """ Build a simulating hardware controller. This method may be used both on a real robot and on dev machines. Multiple simulating hardware controllers may be active at one time. """ |
if None is attached_instruments:
attached_instruments = {}
if None is attached_modules:
attached_modules = []
return cls(Simulator(attached_instruments,
attached_modules,
config, loop,
strict_attached_instruments),
config=config, loop=loop) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def register_callback(self, cb):
""" Allows the caller to register a callback, and returns a closure that can be used to unregister the provided callback """ |
self._callbacks.add(cb)
def unregister():
self._callbacks.remove(cb)
return unregister |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_lights(self, button: bool = None, rails: bool = None):
""" Control the robot lights. :param button: If specified, turn the button light on (`True`) or off (`False`). If not specified, do not change the button light. :param rails: If specified, turn the rail lights on (`True`) or off (`False`). If not specified, do not change the rail lights. """ |
self._backend.set_lights(button, rails) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def identify(self, duration_s: int = 5):
""" Blink the button light to identify the robot. :param int duration_s: The duration to blink for, in seconds. """ |
count = duration_s * 4
on = False
for sec in range(count):
then = self._loop.time()
self.set_lights(button=on)
on = not on
now = self._loop.time()
await asyncio.sleep(max(0, 0.25-(now-then)))
self.set_lights(button=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def cache_instruments(self, require: Dict[top_types.Mount, str] = None):
""" - Get the attached instrument on each mount and - Cache their pipette configs from pipette-config.json If specified, the require element should be a dict of mounts to instrument models describing the instruments expected to be present. This can save a subsequent of :py:attr:`attached_instruments` and also serves as the hook for the hardware simulator to decide what is attached. """ |
checked_require = require or {}
self._log.info("Updating instrument model cache")
found = self._backend.get_attached_instruments(checked_require)
for mount, instrument_data in found.items():
model = instrument_data.get('model')
if model is not None:
p = Pipette(model,
self._config.instrument_offset[mount.name.lower()],
instrument_data['id'])
self._attached_instruments[mount] = p
else:
self._attached_instruments[mount] = None
mod_log.info("Instruments found: {}".format(
self._attached_instruments)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def update_firmware( self, firmware_file: str, loop: asyncio.AbstractEventLoop = None, explicit_modeset: bool = True) -> str: """ Update the firmware on the Smoothie board. :param firmware_file: The path to the firmware file. :param explicit_modeset: `True` to force the smoothie into programming mode; `False` to assume it is already in programming mode. :param loop: An asyncio event loop to use; if not specified, the one associated with this instance will be used. :returns: The stdout of the tool used to update the smoothie """ |
if None is loop:
checked_loop = self._loop
else:
checked_loop = loop
return await self._backend.update_firmware(firmware_file,
checked_loop,
explicit_modeset) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def home_z(self, mount: top_types.Mount = None):
""" Home the two z-axes """ |
if not mount:
axes = [Axis.Z, Axis.A]
else:
axes = [Axis.by_mount(mount)]
await self.home(axes) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def home_plunger(self, mount: top_types.Mount):
""" Home the plunger motor for a mount, and then return it to the 'bottom' position. :param mount: the mount associated with the target plunger :type mount: :py:class:`.top_types.Mount` """ |
instr = self._attached_instruments[mount]
if instr:
await self.home([Axis.of_plunger(mount)])
await self._move_plunger(mount,
instr.config.bottom) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _deck_from_smoothie( self, smoothie_pos: Dict[str, float]) -> Dict[Axis, float]: """ Build a deck-abs position store from the smoothie's position This should take the smoothie style position {'X': float, etc} and turn it into the position dict used here {Axis.X: float} in deck-absolute coordinates. It runs the reverse deck transformation for the axes that require it. One piece of complexity is that if the gantry transformation includes a transition between non parallel planes, the z position of the left mount would depend on its actual position in deck frame, so we have to apply the mount offset. TODO: Figure out which frame the mount offset is measured in, because if it's measured in the deck frame (e.g. by touching off points on the deck) it has to go through the reverse transform to be added to the smoothie coordinates here. """ |
with_enum = {Axis[k]: v for k, v in smoothie_pos.items()}
plunger_axes = {k: v for k, v in with_enum.items()
if k not in Axis.gantry_axes()}
right = (with_enum[Axis.X], with_enum[Axis.Y],
with_enum[Axis.by_mount(top_types.Mount.RIGHT)])
# Tell apply_transform to just do the change of base part of the
# transform rather than the full affine transform, because this is
# an offset
left = (with_enum[Axis.X],
with_enum[Axis.Y],
with_enum[Axis.by_mount(top_types.Mount.LEFT)])
right_deck = linal.apply_reverse(self.config.gantry_calibration,
right)
left_deck = linal.apply_reverse(self.config.gantry_calibration,
left)
deck_pos = {Axis.X: right_deck[0],
Axis.Y: right_deck[1],
Axis.by_mount(top_types.Mount.RIGHT): right_deck[2],
Axis.by_mount(top_types.Mount.LEFT): left_deck[2]}
deck_pos.update(plunger_axes)
return deck_pos |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def gantry_position( self, mount: top_types.Mount, critical_point: CriticalPoint = None) -> top_types.Point: """ Return the position of the critical point as pertains to the gantry This ignores the plunger position and gives the Z-axis a predictable name (as :py:attr:`.Point.z`). `critical_point` specifies an override to the current critical point to use (see :py:meth:`current_position`). """ |
cur_pos = await self.current_position(mount, critical_point)
return top_types.Point(x=cur_pos[Axis.X],
y=cur_pos[Axis.Y],
z=cur_pos[Axis.by_mount(mount)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def move_to( self, mount: top_types.Mount, abs_position: top_types.Point, speed: float = None, critical_point: CriticalPoint = None):
""" Move the critical point of the specified mount to a location relative to the deck, at the specified speed. 'speed' sets the speed of all robot axes to the given value. So, if multiple axes are to be moved, they will do so at the same speed The critical point of the mount depends on the current status of the mount: - If the mount does not have anything attached, its critical point is the bottom of the mount attach bracket. - If the mount has a pipette attached and it is not known to have a pipette tip, the critical point is the end of the nozzle of a single pipette or the end of the backmost nozzle of a multipipette - If the mount has a pipette attached and it is known to have a pipette tip, the critical point is the end of the pipette tip for a single pipette or the end of the tip of the backmost nozzle of a multipipette :param mount: The mount to move :param abs_position: The target absolute position in :ref:`protocol-api-deck-coords` to move the critical point to :param speed: An overall head speed to use during the move :param critical_point: The critical point to move. In most situations this is not needed. If not specified, the current critical point will be moved. If specified, the critical point must be one that actually exists - that is, specifying :py:attr:`.CriticalPoint.NOZZLE` when no pipette is attached or :py:attr:`.CriticalPoint.TIP` when no tip is applied will result in an error. """ |
if not self._current_position:
raise MustHomeError
await self._cache_and_maybe_retract_mount(mount)
z_axis = Axis.by_mount(mount)
if mount == top_types.Mount.LEFT:
offset = top_types.Point(*self.config.mount_offset)
else:
offset = top_types.Point(0, 0, 0)
cp = self._critical_point_for(mount, critical_point)
target_position = OrderedDict(
((Axis.X, abs_position.x - offset.x - cp.x),
(Axis.Y, abs_position.y - offset.y - cp.y),
(z_axis, abs_position.z - offset.z - cp.z))
)
await self._move(target_position, speed=speed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def move_rel(self, mount: top_types.Mount, delta: top_types.Point, speed: float = None):
""" Move the critical point of the specified mount by a specified displacement in a specified direction, at the specified speed. 'speed' sets the speed of all axes to the given value. So, if multiple axes are to be moved, they will do so at the same speed """ |
if not self._current_position:
raise MustHomeError
await self._cache_and_maybe_retract_mount(mount)
z_axis = Axis.by_mount(mount)
try:
target_position = OrderedDict(
((Axis.X,
self._current_position[Axis.X] + delta.x),
(Axis.Y,
self._current_position[Axis.Y] + delta.y),
(z_axis,
self._current_position[z_axis] + delta.z))
)
except KeyError:
raise MustHomeError
await self._move(target_position, speed=speed) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def _cache_and_maybe_retract_mount(self, mount: top_types.Mount):
""" Retract the 'other' mount if necessary If `mount` does not match the value in :py:attr:`_last_moved_mount` (and :py:attr:`_last_moved_mount` exists) then retract the mount in :py:attr:`_last_moved_mount`. Also unconditionally update :py:attr:`_last_moved_mount` to contain `mount`. """ |
if mount != self._last_moved_mount and self._last_moved_mount:
await self.retract(self._last_moved_mount, 10)
self._last_moved_mount = mount |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def _move(self, target_position: 'OrderedDict[Axis, float]', speed: float = None, home_flagged_axes: bool = True):
""" Worker function to apply robot motion. Robot motion means the kind of motions that are relevant to the robot, i.e. only one pipette plunger and mount move at the same time, and an XYZ move in the coordinate frame of one of the pipettes. ``target_position`` should be an ordered dict (ordered by XYZABC) of deck calibrated values, containing any specified XY motion and at most one of a ZA or BC components. The frame in which to move is identified by the presence of (ZA) or (BC). """ |
# Transform only the x, y, and (z or a) axes specified since this could
# get the b or c axes as well
to_transform = tuple((tp
for ax, tp in target_position.items()
if ax in Axis.gantry_axes()))
# Pre-fill the dict we’ll send to the backend with the axes we don’t
# need to transform
smoothie_pos = {ax.name: pos for ax, pos in target_position.items()
if ax not in Axis.gantry_axes()}
# We’d better have all of (x, y, (z or a)) or none of them since the
# gantry transform requires them all
if len(to_transform) != 3:
self._log.error("Move derived {} axes to transform from {}"
.format(len(to_transform), target_position))
raise ValueError("Moves must specify either exactly an x, y, and "
"(z or a) or none of them")
# Type ignored below because linal.apply_transform (rightly) specifies
# Tuple[float, float, float] and the implied type from
# target_position.items() is (rightly) Tuple[float, ...] with unbounded
# size; unfortunately, mypy can’t quite figure out the length check
# above that makes this OK
transformed = linal.apply_transform( # type: ignore
self.config.gantry_calibration, to_transform)
# Since target_position is an OrderedDict with the axes ordered by
# (x, y, z, a, b, c), and we’ll only have one of a or z (as checked
# by the len(to_transform) check above) we can use an enumerate to
# fuse the specified axes and the transformed values back together.
# While we do this iteration, we’ll also check axis bounds.
bounds = self._backend.axis_bounds
for idx, ax in enumerate(target_position.keys()):
if ax in Axis.gantry_axes():
smoothie_pos[ax.name] = transformed[idx]
if smoothie_pos[ax.name] < bounds[ax.name][0]\
or smoothie_pos[ax.name] > bounds[ax.name][1]:
deck_mins = self._deck_from_smoothie({ax: bound[0]
for ax, bound
in bounds.items()})
deck_max = self._deck_from_smoothie({ax: bound[1]
for ax, bound
in bounds.items()})
self._log.warning(
"Out of bounds move: {}={} (transformed: {}) not in"
"limits ({}, {}) (transformed: ({}, {})"
.format(ax.name,
target_position[ax],
smoothie_pos[ax.name],
deck_mins[ax], deck_max[ax],
bounds[ax.name][0], bounds[ax.name][1]))
async with self._motion_lock:
try:
self._backend.move(smoothie_pos, speed=speed,
home_flagged_axes=home_flagged_axes)
except Exception:
self._log.exception('Move failed')
self._current_position.clear()
raise
else:
self._current_position.update(target_position) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def engaged_axes(self) -> Dict[Axis, bool]: """ Which axes are engaged and holding. """ |
return {Axis[ax]: eng
for ax, eng in self._backend.engaged_axes().items()} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def retract(self, mount: top_types.Mount, margin: float):
""" Pull the specified mount up to its home position. Works regardless of critical point or home status. """ |
smoothie_ax = Axis.by_mount(mount).name.upper()
async with self._motion_lock:
smoothie_pos = self._backend.fast_home(smoothie_ax, margin)
self._current_position = self._deck_from_smoothie(smoothie_pos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _critical_point_for( self, mount: top_types.Mount, cp_override: CriticalPoint = None) -> top_types.Point: """ Return the current critical point of the specified mount. The mount's critical point is the position of the mount itself, if no pipette is attached, or the pipette's critical point (which depends on tip status). If `cp_override` is specified, and that critical point actually exists, it will be used instead. Invalid `cp_override`s are ignored. """ |
pip = self._attached_instruments[mount]
if pip is not None and cp_override != CriticalPoint.MOUNT:
return pip.critical_point(cp_override)
else:
# TODO: The smoothie’s z/a home position is calculated to provide
# the offset for a P300 single. Here we should decide whether we
# implicitly accept this as correct (by returning a null offset)
# or not (by returning an offset calculated to move back up the
# length of the P300 single).
return top_types.Point(0, 0, 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def blow_out(self, mount):
""" Force any remaining liquid to dispense. The liquid will be dispensed at the current location of pipette """ |
this_pipette = self._attached_instruments[mount]
if not this_pipette:
raise top_types.PipetteNotAttachedError(
"No pipette attached to {} mount".format(mount.name))
self._backend.set_active_current(Axis.of_plunger(mount),
this_pipette.config.plunger_current)
try:
await self._move_plunger(
mount, this_pipette.config.blow_out)
except Exception:
self._log.exception('Blow out failed')
raise
finally:
this_pipette.set_current_volume(0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def pick_up_tip(self, mount, tip_length: float, presses: int = None, increment: float = None):
""" Pick up tip from current location. If ``presses`` or ``increment`` is not specified (or is ``None``), their value is taken from the pipette configuration """ |
instr = self._attached_instruments[mount]
assert instr
assert not instr.has_tip, 'Tip already attached'
instr_ax = Axis.by_mount(mount)
plunger_ax = Axis.of_plunger(mount)
self._log.info('Picking up tip on {}'.format(instr.name))
# Initialize plunger to bottom position
self._backend.set_active_current(plunger_ax,
instr.config.plunger_current)
await self._move_plunger(
mount, instr.config.bottom)
if not presses or presses < 0:
checked_presses = instr.config.pick_up_presses
else:
checked_presses = presses
if not increment or increment < 0:
checked_increment = instr.config.pick_up_increment
else:
checked_increment = increment
# Press the nozzle into the tip <presses> number of times,
# moving further by <increment> mm after each press
for i in range(checked_presses):
# move nozzle down into the tip
with self._backend.save_current():
self._backend.set_active_current(instr_ax,
instr.config.pick_up_current)
dist = -1.0 * instr.config.pick_up_distance\
+ -1.0 * checked_increment * i
target_pos = top_types.Point(0, 0, dist)
await self.move_rel(
mount, target_pos, instr.config.pick_up_speed)
# move nozzle back up
backup_pos = top_types.Point(0, 0, -dist)
await self.move_rel(mount, backup_pos)
instr.add_tip(tip_length=tip_length)
instr.set_current_volume(0)
# neighboring tips tend to get stuck in the space between
# the volume chamber and the drop-tip sleeve on p1000.
# This extra shake ensures those tips are removed
if 'needs-pickup-shake' in instr.config.quirks:
await self._shake_off_tips(mount)
await self._shake_off_tips(mount)
await self.retract(mount, instr.config.pick_up_distance) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def drop_tip(self, mount, home_after=True):
""" Drop tip at the current location :param Mount mount: The mount to drop a tip from :param bool home_after: Home the plunger motor after dropping tip. This is used in case the plunger motor skipped while dropping the tip, and is also used to recover the ejector shroud after a drop. """ |
instr = self._attached_instruments[mount]
assert instr
assert instr.has_tip, 'Cannot drop tip without a tip attached'
self._log.info("Dropping tip off from {}".format(instr.name))
plunger_ax = Axis.of_plunger(mount)
droptip = instr.config.drop_tip
bottom = instr.config.bottom
self._backend.set_active_current(plunger_ax,
instr.config.plunger_current)
await self._move_plunger(mount, bottom)
self._backend.set_active_current(plunger_ax,
instr.config.drop_tip_current)
await self._move_plunger(
mount, droptip, speed=instr.config.drop_tip_speed)
await self._shake_off_tips(mount)
self._backend.set_active_current(plunger_ax,
instr.config.plunger_current)
instr.set_current_volume(0)
instr.remove_tip()
if home_after:
safety_margin = abs(bottom-droptip)
async with self._motion_lock:
smoothie_pos = self._backend.fast_home(
plunger_ax.name.upper(), safety_margin)
self._current_position = self._deck_from_smoothie(smoothie_pos)
await self._move_plunger(mount, safety_margin) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def update_module( self, module: modules.AbstractModule, firmware_file: str, loop: asyncio.AbstractEventLoop = None) -> Tuple[bool, str]: """ Update a module's firmware. Returns (ok, message) where ok is True if the update succeeded and message is a human readable message. """ |
details = (module.port, module.name())
mod = self._attached_modules.pop(details[0] + details[1])
try:
new_mod = await self._backend.update_module(
mod, firmware_file, loop)
except modules.UpdateError as e:
return False, e.msg
else:
new_details = new_mod.port + new_mod.device_info['model']
self._attached_modules[new_details] = new_mod
return True, 'firmware update successful' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_instrument_offset(self, mount, new_offset: top_types.Point = None, from_tip_probe: top_types.Point = None):
""" Update the instrument offset for a pipette on the specified mount. This will update both the stored value in the robot settings and the live value in the currently-loaded pipette. This can be specified either directly by using the new_offset arg or using the result of a previous call to :py:meth:`locate_tip_probe_center` with the same mount. :note: Z differences in the instrument offset cannot be disambiguated between differences in the position of the nozzle and differences in the length of the nozzle/tip interface (assuming that tips are of reasonably uniform length). For this reason, they are saved as adjustments to the nozzle interface length and only applied when a tip is present. """ |
if from_tip_probe:
new_offset = (top_types.Point(*self._config.tip_probe.center)
- from_tip_probe)
elif not new_offset:
raise ValueError(
"Either from_tip_probe or new_offset must be specified")
opt_pip = self._attached_instruments[mount]
assert opt_pip, '{} has no pipette'.format(mount.name.lower())
pip = opt_pip
inst_offs = self._config.instrument_offset
pip_type = 'multi' if pip.config.channels > 1 else 'single'
inst_offs[mount.name.lower()][pip_type] = [new_offset.x,
new_offset.y,
new_offset.z]
self.update_config(instrument_offset=inst_offs)
pip.update_instrument_offset(new_offset)
robot_configs.save_robot_settings(self._config) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(pipette_model: str, pipette_id: str = None) -> pipette_config: """ Load pipette config data This function loads from a combination of - the pipetteModelSpecs.json file in the wheel (should never be edited) - the pipetteNameSpecs.json file in the wheel(should never be edited) - any config overrides found in ``opentrons.config.CONFIG['pipette_config_overrides_dir']`` This function reads from disk each time, so changes to the overrides will be picked up in subsequent calls. :param str pipette_model: The pipette model name (i.e. "p10_single_v1.3") for which to load configuration :param pipette_id: An (optional) unique ID for the pipette to locate config overrides. If the ID is not specified, the system assumes this is a simulated pipette and does not save settings. If the ID is specified but no overrides corresponding to the ID are found, the system creates a new overrides file for it. :type pipette_id: str or None :raises KeyError: if ``pipette_model`` is not in the top-level keys of pipetteModeLSpecs.json (and therefore not in :py:attr:`configs`) :returns pipette_config: The configuration, loaded and checked """ |
# Load the model config and update with the name config
cfg = copy.deepcopy(configs[pipette_model])
cfg.update(copy.deepcopy(name_config()[cfg['name']]))
# Load overrides if we have a pipette id
if pipette_id:
try:
override = load_overrides(pipette_id)
except FileNotFoundError:
save_overrides(pipette_id, {}, pipette_model)
log.info(
"Save defaults for pipette model {} and id {}".format(
pipette_model, pipette_id))
else:
cfg.update(override)
# the ulPerMm functions are structured in pipetteModelSpecs.json as
# a list sorted from oldest to newest. That means the latest functions
# are always the last element and, as of right now, the older ones are
# the first element (for models that only have one function, the first
# and last elements are the same, which is fine). If we add more in the
# future, we’ll have to change this code to select items more
# intelligently
if ff.use_old_aspiration_functions():
log.info("Using old aspiration functions")
ul_per_mm = cfg['ulPerMm'][0]
else:
log.info("Using new aspiration functions")
ul_per_mm = cfg['ulPerMm'][-1]
res = pipette_config(
top=ensure_value(
cfg, 'top', mutable_configs),
bottom=ensure_value(
cfg, 'bottom', mutable_configs),
blow_out=ensure_value(
cfg, 'blowout', mutable_configs),
drop_tip=ensure_value(
cfg, 'dropTip', mutable_configs),
pick_up_current=ensure_value(cfg, 'pickUpCurrent', mutable_configs),
pick_up_distance=ensure_value(cfg, 'pickUpDistance', mutable_configs),
pick_up_increment=ensure_value(
cfg, 'pickUpIncrement', mutable_configs),
pick_up_presses=ensure_value(cfg, 'pickUpPresses', mutable_configs),
pick_up_speed=ensure_value(cfg, 'pickUpSpeed', mutable_configs),
aspirate_flow_rate=ensure_value(
cfg, 'defaultAspirateFlowRate', mutable_configs),
dispense_flow_rate=ensure_value(
cfg, 'defaultDispenseFlowRate', mutable_configs),
channels=ensure_value(cfg, 'channels', mutable_configs),
model_offset=ensure_value(cfg, 'modelOffset', mutable_configs),
plunger_current=ensure_value(cfg, 'plungerCurrent', mutable_configs),
drop_tip_current=ensure_value(cfg, 'dropTipCurrent', mutable_configs),
drop_tip_speed=ensure_value(cfg, 'dropTipSpeed', mutable_configs),
min_volume=ensure_value(cfg, 'minVolume', mutable_configs),
max_volume=ensure_value(cfg, 'maxVolume', mutable_configs),
ul_per_mm=ul_per_mm,
quirks=ensure_value(cfg, 'quirks', mutable_configs),
tip_length=ensure_value(cfg, 'tipLength', mutable_configs),
display_name=ensure_value(cfg, 'displayName', mutable_configs)
)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def known_pipettes() -> Sequence[str]: """ List pipette IDs for which we have known overrides """ |
return [fi.stem
for fi in CONFIG['pipette_config_overrides_dir'].iterdir()
if fi.is_file() and '.json' in fi.suffixes] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_config_dict(pipette_id: str) -> Dict: """ Give updated config with overrides for a pipette. This will add the default value for a mutable config before returning the modified config value. """ |
override = load_overrides(pipette_id)
model = override['model']
config = copy.deepcopy(model_config()['config'][model])
config.update(copy.deepcopy(name_config()[config['name']]))
for top_level_key in config.keys():
add_default(config[top_level_key])
config.update(override)
return config |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list_mutable_configs(pipette_id: str) -> Dict[str, Any]: """ Returns dict of mutable configs only. """ |
cfg: Dict[str, Any] = {}
if pipette_id in known_pipettes():
config = load_config_dict(pipette_id)
else:
log.info('Pipette id {} not found'.format(pipette_id))
return cfg
for key in config:
if key in mutable_configs:
cfg[key] = config[key]
return cfg |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def probe_plate(self) -> str:
'''
Probes for the deck plate and calculates the plate distance
from home.
To be used for calibrating MagDeck
'''
self.run_flag.wait()
try:
self._send_command(GCODES['PROBE_PLATE'])
except (MagDeckError, SerialException, SerialNoResponse) as e:
return str(e)
return '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def move(self, position_mm) -> str:
'''
Move the magnets along Z axis where the home position is 0.0;
position_mm-> a point along Z. Does not self-check if the position
is outside of the deck's linear range
'''
self.run_flag.wait()
try:
position_mm = round(float(position_mm), GCODE_ROUNDING_PRECISION)
self._send_command('{0} Z{1}'.format(GCODES['MOVE'], position_mm))
except (MagDeckError, SerialException, SerialNoResponse) as e:
return str(e)
return '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_calibration(labware: Labware, delta: Point):
""" Function to be used whenever an updated delta is found for the first well of a given labware. If an offset file does not exist, create the file using labware id as the filename. If the file does exist, load it and modify the delta and the lastModified fields under the "default" key. """ |
calibration_path = CONFIG['labware_calibration_offsets_dir_v4']
if not calibration_path.exists():
calibration_path.mkdir(parents=True, exist_ok=True)
labware_offset_path = calibration_path/'{}.json'.format(labware._id)
calibration_data = _helper_offset_data_format(
str(labware_offset_path), delta)
with labware_offset_path.open('w') as f:
json.dump(calibration_data, f)
labware.set_calibration(delta) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_tip_length(labware: Labware, length: float):
""" Function to be used whenever an updated tip length is found for of a given tip rack. If an offset file does not exist, create the file using labware id as the filename. If the file does exist, load it and modify the length and the lastModified fields under the "tipLength" key. """ |
calibration_path = CONFIG['labware_calibration_offsets_dir_v4']
if not calibration_path.exists():
calibration_path.mkdir(parents=True, exist_ok=True)
labware_offset_path = calibration_path/'{}.json'.format(labware._id)
calibration_data = _helper_tip_length_data_format(
str(labware_offset_path), length)
with labware_offset_path.open('w') as f:
json.dump(calibration_data, f)
labware.tip_length = length |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_calibration(labware: Labware):
""" Look up a calibration if it exists and apply it to the given labware. """ |
calibration_path = CONFIG['labware_calibration_offsets_dir_v4']
labware_offset_path = calibration_path/'{}.json'.format(labware._id)
if labware_offset_path.exists():
calibration_data = _read_file(str(labware_offset_path))
offset_array = calibration_data['default']['offset']
offset = Point(x=offset_array[0], y=offset_array[1], z=offset_array[2])
labware.set_calibration(offset)
if 'tipLength' in calibration_data.keys():
tip_length = calibration_data['tipLength']['length']
labware.tip_length = tip_length |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_from_definition( definition: dict, parent: Location, label: str = None) -> Labware: """ Return a labware object constructed from a provided labware definition dict :param definition: A dict representing all required data for a labware, including metadata such as the display name of the labware, a definition of the order to iterate over wells, the shape of wells (shape, physical dimensions, etc), and so on. The correct shape of this definition is governed by the "labware-designer" project in the Opentrons/opentrons repo. :param parent: A :py:class:`.Location` representing the location where the front and left most point of the outside of labware is (often the front-left corner of a slot on the deck). :param str label: An optional label that will override the labware's display name from its definition """ |
labware = Labware(definition, parent, label)
load_calibration(labware)
return labware |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clear_calibrations():
""" Delete all calibration files for labware. This includes deleting tip-length data for tipracks. """ |
calibration_path = CONFIG['labware_calibration_offsets_dir_v4']
try:
targets = [
f for f in calibration_path.iterdir() if f.suffix == '.json']
for target in targets:
target.unlink()
except FileNotFoundError:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def quirks_from_any_parent( loc: Union[Labware, Well, str, ModuleGeometry, None]) -> List[str]: """ Walk the tree of wells and labwares and extract quirks """ |
def recursive_get_quirks(obj, found):
if isinstance(obj, Labware):
return found + obj.quirks
elif isinstance(obj, Well):
return recursive_get_quirks(obj.parent, found)
else:
return found
return recursive_get_quirks(loc, []) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _build_wells(self) -> List[Well]: """ This function is used to create one instance of wells to be used by all accessor functions. It is only called again if a new offset needs to be applied. """ |
return [
Well(
self._well_definition[well],
Location(self._calibrated_offset, self),
"{} of {}".format(well, self._display_name),
self.is_tiprack)
for well in self._ordering] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_indexed_dictionary(self, group=0):
""" Creates a dict of lists of Wells. Which way the labware is segmented determines whether this is a dict of rows or dict of columns. If group is 1, then it will collect wells that have the same alphabetic prefix and therefore are considered to be in the same row. If group is 2, it will collect wells that have the same numeric postfix and therefore are considered to be in the same column. """ |
dict_list = defaultdict(list)
for index, well_obj in zip(self._ordering, self._wells):
dict_list[self._pattern.match(index).group(group)].append(well_obj)
return dict_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_calibration(self, delta: Point):
""" Called by save calibration in order to update the offset on the object. """ |
self._calibrated_offset = Point(x=self._offset.x + delta.x,
y=self._offset.y + delta.y,
z=self._offset.z + delta.z)
self._wells = self._build_wells() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wells_by_index(self) -> Dict[str, Well]: """ Accessor function used to create a look-up table of Wells by name. With indexing one can treat it as a typical python dictionary whose keys are well names. To access well A1, for example, simply write: labware.wells_by_index()['A1'] :return: Dictionary of well objects keyed by well name """ |
return {well: wellObj
for well, wellObj in zip(self._ordering, self._wells)} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rows(self, *args) -> List[List[Well]]: """ Accessor function used to navigate through a labware by row. With indexing one can treat it as a typical python nested list. To access row A for example, simply write: labware.rows()[0]. This Note that this method takes args for backward-compatibility, but use of args is deprecated and will be removed in future versions. Args can be either strings or integers, but must all be the same type (e.g.: `self.rows(1, 4, 8)` or `self.rows('A', 'B')`, but `self.rows('A', 4)` is invalid. :return: A list of row lists """ |
row_dict = self._create_indexed_dictionary(group=1)
keys = sorted(row_dict)
if not args:
res = [row_dict[key] for key in keys]
elif isinstance(args[0], int):
res = [row_dict[keys[idx]] for idx in args]
elif isinstance(args[0], str):
res = [row_dict[idx] for idx in args]
else:
raise TypeError
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rows_by_index(self) -> Dict[str, List[Well]]: """ Accessor function used to navigate through a labware by row name. With indexing one can treat it as a typical python dictionary. To access row A for example, simply write: labware.rows_by_index()['A'] :return: Dictionary of Well lists keyed by row name """ |
row_dict = self._create_indexed_dictionary(group=1)
return row_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def columns(self, *args) -> List[List[Well]]: """ Accessor function used to navigate through a labware by column. With indexing one can treat it as a typical python nested list. To access row A for example, simply write: labware.columns()[0] Note that this method takes args for backward-compatibility, but use of args is deprecated and will be removed in future versions. Args can be either strings or integers, but must all be the same type (e.g.: `self.columns(1, 4, 8)` or `self.columns('1', '2')`, but `self.columns('1', 4)` is invalid. :return: A list of column lists """ |
col_dict = self._create_indexed_dictionary(group=2)
keys = sorted(col_dict, key=lambda x: int(x))
if not args:
res = [col_dict[key] for key in keys]
elif isinstance(args[0], int):
res = [col_dict[keys[idx]] for idx in args]
elif isinstance(args[0], str):
res = [col_dict[idx] for idx in args]
else:
raise TypeError
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def columns_by_index(self) -> Dict[str, List[Well]]: """ Accessor function used to navigate through a labware by column name. With indexing one can treat it as a typical python dictionary. To access row A for example, simply write: labware.columns_by_index()['1'] :return: Dictionary of Well lists keyed by column name """ |
col_dict = self._create_indexed_dictionary(group=2)
return col_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def next_tip(self, num_tips: int = 1) -> Optional[Well]: """ Find the next valid well for pick-up. Determines the next valid start tip from which to retrieve the specified number of tips. There must be at least `num_tips` sequential wells for which all wells have tips, in the same column. :param num_tips: target number of sequential tips in the same column :type num_tips: int :return: the :py:class:`.Well` meeting the target criteria, or None """ |
assert num_tips > 0
columns: List[List[Well]] = self.columns()
drop_leading_empties = [
list(dropwhile(lambda x: not x.has_tip, column))
for column in columns]
drop_at_first_gap = [
list(takewhile(lambda x: x.has_tip, column))
for column in drop_leading_empties]
long_enough = [
column for column in drop_at_first_gap if len(column) >= num_tips]
try:
first_long_enough = long_enough[0]
result: Optional[Well] = first_long_enough[0]
except IndexError:
result = None
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def use_tips(self, start_well: Well, num_channels: int = 1):
""" Removes tips from the tip tracker. This method should be called when a tip is picked up. Generally, it will be called with `num_channels=1` or `num_channels=8` for single- and multi-channel respectively. If picking up with more than one channel, this method will automatically determine which tips are used based on the start well, the number of channels, and the geometry of the tiprack. :param start_well: The :py:class:`.Well` from which to pick up a tip. For a single-channel pipette, this is the well to send the pipette to. For a multi-channel pipette, this is the well to send the back-most nozzle of the pipette to. :type start_well: :py:class:`.Well` :param num_channels: The number of channels for the current pipette :type num_channels: int """ |
assert num_channels > 0, 'Bad call to use_tips: num_channels==0'
# Select the column of the labware that contains the target well
target_column: List[Well] = [
col for col in self.columns() if start_well in col][0]
well_idx = target_column.index(start_well)
# Number of tips to pick up is the lesser of (1) the number of tips
# from the starting well to the end of the column, and (2) the number
# of channels of the pipette (so a 4-channel pipette would pick up a
# max of 4 tips, and picking up from the 2nd-to-bottom well in a
# column would get a maximum of 2 tips)
num_tips = min(len(target_column) - well_idx, num_channels)
target_wells = target_column[well_idx: well_idx + num_tips]
assert all([well.has_tip for well in target_wells]),\
'{} is out of tips'.format(str(self))
for well in target_wells:
well.has_tip = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def require_linklocal(handler):
""" Ensure the decorated is only called if the request is linklocal. The host ip address should be in the X-Host-IP header (provided by nginx) """ |
@functools.wraps(handler)
async def decorated(request: web.Request) -> web.Response:
ipaddr_str = request.headers.get('x-host-ip')
invalid_req_data = {
'error': 'bad-interface',
'message': f'The endpoint {request.url} can only be used from '
'local connections'
}
if not ipaddr_str:
return web.json_response(
data=invalid_req_data,
status=403)
try:
addr = ipaddress.ip_address(ipaddr_str)
except ValueError:
LOG.exception(f"Couldn't parse host ip address {ipaddr_str}")
raise
if not addr.is_link_local:
return web.json_response(data=invalid_req_data, status=403)
return await handler(request)
return decorated |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authorized_keys(mode='r'):
""" Open the authorized_keys file. Separate function for mocking. :param mode: As :py:meth:`open` """ |
path = '/var/home/.ssh/authorized_keys'
if not os.path.exists(path):
os.makedirs(os.path.dirname(path))
open(path, 'w').close()
with open(path, mode) as ak:
yield ak |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_by_hash(hashval: str):
""" Remove the key whose md5 sum matches hashval. :raises: KeyError if the hashval wasn't found """ |
key_details = get_keys()
with authorized_keys('w') as ak:
for keyhash, key in key_details:
if keyhash != hashval:
ak.write(f'{key}\n')
break
else:
raise KeyError(hashval) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def list_keys(request: web.Request) -> web.Response: """ List keys in the authorized_keys file. GET /server/ssh_keys -> 200 OK {"public_keys": [{"key_md5": md5 hex digest, "key": key string}]} (or 403 if not from the link-local connection) """ |
return web.json_response(
{'public_keys': [{'key_md5': details[0], 'key': details[1]}
for details in get_keys()]},
status=200) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def add(request: web.Request) -> web.Response: """ Add a public key to the authorized_keys file. POST /server/ssh_keys {"key": key string} -> 201 Created If the key string doesn't look like an openssh public key, rejects with 400 """ |
body = await request.json()
if 'key' not in body or not isinstance(body['key'], str):
return web.json_response(
data={'error': 'no-key', 'message': 'No "key" element in body'},
status=400)
pubkey = body['key']
# Do some fairly minor sanitization; dropbear will ignore invalid keys but
# we still don’t want to have a bunch of invalid data in there
alg = pubkey.split()[0]
# We don’t allow dss so this has to be rsa or ecdsa and shouldn’t start
# with restrictions
if alg != 'ssh-rsa' and not alg.startswith('ecdsa'):
LOG.warning(f"weird keyfile uploaded: starts with {alg}")
return web.json_response(
data={'error': 'bad-key',
'message': f'Key starts with invalid algorithm {alg}'},
status=400)
if '\n' in pubkey[:-1]:
LOG.warning(f"Newlines in keyfile that shouldn't be there")
return web.json_response(
data={'error': 'bad-key', 'message': f'Key has a newline'},
status=400)
if '\n' == pubkey[-1]:
pubkey = pubkey[:-1]
# This is a more or less correct key we can write
hashval = hashlib.new('md5', pubkey.encode()).hexdigest()
if not key_present(hashval):
with authorized_keys('a') as ak:
ak.write(f'{pubkey}\n')
return web.json_response(
data={'message': 'Added key {hashval}',
'key_md5': hashval},
status=201) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def remove(request: web.Request) -> web.Response: """ Remove a public key from authorized_keys DELETE /server/ssh_keys/:key_md5_hexdigest -> 200 OK if the key was found -> 404 Not Found otherwise """ |
requested_hash = request.match_info['key_md5']
new_keys: List[str] = []
found = False
for keyhash, key in get_keys():
if keyhash == requested_hash:
found = True
else:
new_keys.append(key)
if not found:
return web.json_response(
data={'error': 'invalid-key-hash',
'message': f'No such key md5 {requested_hash}'},
status=404)
with authorized_keys('w') as ak:
ak.write('\n'.join(new_keys) + '\n')
return web.json_response(
data={
'message': f'Key {requested_hash} deleted. '
'Restart robot to take effect',
'restart_url': '/server/restart'},
status=200) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log_init():
""" Function that sets log levels and format strings. Checks for the OT_API_LOG_LEVEL environment variable otherwise defaults to DEBUG. """ |
fallback_log_level = 'INFO'
ot_log_level = hardware.config.log_level
if ot_log_level not in logging._nameToLevel:
log.info("OT Log Level {} not found. Defaulting to {}".format(
ot_log_level, fallback_log_level))
ot_log_level = fallback_log_level
level_value = logging._nameToLevel[ot_log_level]
serial_log_filename = CONFIG['serial_log_file']
api_log_filename = CONFIG['api_log_file']
logging_config = dict(
version=1,
formatters={
'basic': {
'format':
'%(asctime)s %(name)s %(levelname)s [Line %(lineno)s] %(message)s' # noqa: E501
}
},
handlers={
'debug': {
'class': 'logging.StreamHandler',
'formatter': 'basic',
'level': level_value
},
'serial': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'basic',
'filename': serial_log_filename,
'maxBytes': 5000000,
'level': logging.DEBUG,
'backupCount': 3
},
'api': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'basic',
'filename': api_log_filename,
'maxBytes': 1000000,
'level': logging.DEBUG,
'backupCount': 5
}
},
loggers={
'__main__': {
'handlers': ['debug', 'api'],
'level': logging.INFO
},
'opentrons.server': {
'handlers': ['debug', 'api'],
'level': level_value
},
'opentrons.api': {
'handlers': ['debug', 'api'],
'level': level_value
},
'opentrons.instruments': {
'handlers': ['debug', 'api'],
'level': level_value
},
'opentrons.config': {
'handlers': ['debug', 'api'],
'level': level_value
},
'opentrons.drivers.smoothie_drivers.driver_3_0': {
'handlers': ['debug', 'api'],
'level': level_value
},
'opentrons.drivers.serial_communication': {
'handlers': ['serial'],
'level': logging.DEBUG
},
'opentrons.drivers.thermocycler.driver': {
'handlers': ['serial'],
'level': logging.DEBUG
},
'opentrons.protocol_api': {
'handlers': ['api', 'debug'],
'level': level_value
},
'opentrons.hardware_control': {
'handlers': ['api', 'debug'],
'level': level_value
},
'opentrons.legacy_api.containers': {
'handlers': ['api'],
'level': level_value
}
}
)
dictConfig(logging_config) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" The main entrypoint for the Opentrons robot API server stack. This function - creates and starts the server for both the RPC routes handled by :py:mod:`opentrons.server.rpc` and the HTTP routes handled by :py:mod:`opentrons.server.http` - initializes the hardware interaction handled by either :py:mod:`opentrons.legacy_api` or :py:mod:`opentrons.hardware_control` This function does not return until the server is brought down. """ |
arg_parser = ArgumentParser(
description="Opentrons robot software",
parents=[build_arg_parser()])
args = arg_parser.parse_args()
run(**vars(args))
arg_parser.exit(message="Stopped\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_rules_file():
""" Copy the udev rules file for Opentrons Modules to opentrons_data directory and trigger the new rules. This rules file in opentrons_data is symlinked into udev rules directory TODO: Move this file to resources and move the symlink to point to /data/system/ """ |
import shutil
import subprocess
rules_file = os.path.join(
os.path.abspath(os.path.dirname(__file__)), '..',
'config', 'modules', '95-opentrons-modules.rules')
shutil.copy2(
rules_file,
'/data/user_storage/opentrons_data/95-opentrons-modules.rules')
res0 = subprocess.run('udevadm control --reload-rules',
shell=True, stdout=subprocess.PIPE).stdout.decode()
if res0:
log.warning(res0.strip())
res1 = subprocess.run('udevadm trigger',
shell=True, stdout=subprocess.PIPE).stdout.decode()
if res1:
log.warning(res1.strip()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def restart(request: web.Request) -> web.Response: """ Restart the robot. Blocks while the restart lock is held. """ |
async with request.app[RESTART_LOCK_NAME]:
asyncio.get_event_loop().call_later(1, _do_restart)
return web.json_response({'message': 'Restarting in 1s'},
status=200) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def create_virtual_environment(loop=None):
""" Create a virtual environment, and return the path to the virtual env directory, which should contain a "bin" directory with the `python` and `pip` binaries that can be used to a test install of a software package. :return: the path to the virtual environment, its python, and its site pkgs """ |
tmp_dir = tempfile.mkdtemp()
venv_dir = os.path.join(tmp_dir, VENV_NAME)
proc1 = await asyncio.create_subprocess_shell(
'virtualenv {}'.format(venv_dir), loop=loop)
await proc1.communicate()
if sys.platform == 'win32':
python = os.path.join(venv_dir, 'Scripts', 'python.exe')
else:
python = os.path.join(venv_dir, 'bin', 'python')
venv_site_pkgs = install_dependencies(python)
log.info("Created virtual environment at {}".format(venv_dir))
return venv_dir, python, venv_site_pkgs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def install_dependencies(python) -> str: """ Copy aiohttp and virtualenv install locations (and their transitive dependencies in new virtualenv so that the update server can install without access to full system site-packages or connection to the internet. Full access to system site-packages causes the install inside the virtualenv to fail quietly because it does not have permission to overwrite a package by the same name and then it picks up the system version of otupdate. Also, we have to do a copy rather than a symlink because a non- admin Windows account does not have permissions to create symlinks. """ |
# Import all of the packages that need to be available in the virtualenv
# for the update server to boot, so we can locate them using their __file__
# attribute
import aiohttp
import virtualenv_support
import async_timeout
import chardet
import multidict
import yarl
import idna
import pip
import setuptools
import virtualenv
# Determine where the site-packages directory exists in the virtualenv
tmpdirname = python.split(VENV_NAME)[0]
paths_raw = sp.check_output(
'{} -c "import sys; [print(p) for p in sys.path]"'.format(python),
shell=True)
paths = paths_raw.decode().split()
venv_site_pkgs = list(
filter(
lambda x: tmpdirname in x and 'site-packages' in x, paths))[-1]
dependencies = [
('aiohttp', aiohttp),
('virtualenv_support', virtualenv_support),
('async_timeout', async_timeout),
('chardet', chardet),
('multidict', multidict),
('yarl', yarl),
('idna', idna),
('pip', pip),
('setuptools', setuptools),
('virtualenv.py', virtualenv)]
# Copy each dependency from is system-install location to the site-packages
# directory of the virtualenv
for dep_name, dep in dependencies:
src_dir = os.path.abspath(os.path.dirname(dep.__file__))
dst = os.path.join(venv_site_pkgs, dep_name)
if os.path.exists(dst):
log.debug('{} already exists--skipping'.format(dst))
else:
log.debug('Copying {} to {}'.format(dep_name, dst))
if dep_name.endswith('.py'):
shutil.copy2(os.path.join(src_dir, dep_name), dst)
else:
shutil.copytree(src_dir, dst)
return venv_site_pkgs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def _start_server(python, port, venv_site_pkgs=None, cwd=None) -> sp.Popen: """ Starts an update server sandboxed in the virtual env, and attempts to read the health endpoint with retries to determine when the server is available. If the number of retries is exceeded, the returned server process will already be terminated. :return: the server process """ |
log.info("Starting sandboxed update server on port {}".format(port))
if venv_site_pkgs:
python = 'PYTHONPATH={} {}'.format(venv_site_pkgs, python)
cmd = [python, '-m', 'otupdate', '--debug', '--test', '--port', str(port)]
log.debug('cmd: {}'.format(' '.join(cmd)))
proc = sp.Popen(' '.join(cmd), shell=True, cwd=cwd)
atexit.register(lambda: _stop_server(proc))
n_retries = 3
async with aiohttp.ClientSession() as session:
test_status, detail = await selftest.health_check(
session=session, port=port, retries=n_retries)
if test_status == 'failure':
log.debug(
"Test server failed to start after {} retries. Stopping.".format(
n_retries))
_stop_server(proc)
return proc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def install_update(filename, loop):
""" Install the update into the system environment. """ |
log.info("Installing update server into system environment")
log.debug('File {} exists? {}'.format(filename, os.path.exists(filename)))
out, err, returncode = await _install(sys.executable, filename, loop)
if returncode == 0:
msg = out
else:
msg = err
res = {'message': msg, 'filename': filename}
return res, returncode |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def solve(expected: List[Tuple[float, float]], actual: List[Tuple[float, float]]) -> np.ndarray: """ Takes two lists of 3 x-y points each, and calculates the matrix representing the transformation from one space to the other. The 3x3 matrix returned by this method represents the 2-D transformation matrix from the actual point to the expected point. Example: If the expected points are: [ (1, 1), (2, 2), (1, 2) ] And the actual measured points are: [ (1.1, 1.1), (2.1, 2.1), (1.1, 2.1) ] (in other words, a shift of exaxtly +0.1 in both x and y) Then the resulting transformation matrix T should be: [ 1 0 -0.1 ] [ 0 1 -0.1 ] [ 0 0 1 ] Then, if we take a 3x3 matrix B representing one of the measured points on the deck: [ 1 0 1.1 ] [ 0 1 2.1 ] [ 0 0 1 ] The B*T will yeild the "actual" point: [ 1 0 1 ] [ 0 1 2 ] [ 0 0 1 ] The return value of this function is the transformation matrix T """ |
# Note: input list shape validation is handled by the type checker
# Turn expected and actual matricies into numpy ndarrays with the last row
# of [1 1 1] appended, and then take the dot product of the resulting
# actual matrix with the inverse of the resulting expected matrix.
# Shape of `expected` and `actual`:
# [ (x1, y1),
# (x2, y2),
# (x3, y3) ]
ex = np.array([
list(point) + [1]
for point in expected
]).transpose()
ac = np.array([
list(point) + [1]
for point in actual
]).transpose()
# Shape of `ex` and `ac`:
# [ x1 x2 x3 ]
# [ y1 y2 y3 ]
# [ 1 1 1 ]
transform = np.dot(ac, inv(ex))
# `dot` in numpy is a misnomer. When both arguments are square, N-
# dimensional arrays, the return type is the result of performing matrix
# multiplication, rather than the dot-product (so the return here will be
# a 4x4 matrix)
return transform |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_transform( t: Union[List[List[float]], np.ndarray], pos: Tuple[float, float, float], with_offsets=True) -> Tuple[float, float, float]: """ Change of base using a transform matrix. Primarily used to render a point in space in a way that is more readable for the user. :param t: A transformation matrix from one 3D space [A] to another [B] :param pos: XYZ point in space A :param with_offsets: Whether to apply the transform as an affine transform or as a standard transform. You might use with_offsets=False :return: corresponding XYZ point in space B """ |
extended = 1 if with_offsets else 0
return tuple(dot(t, list(pos) + [extended])[:3]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply_reverse( t: Union[List[List[float]], np.ndarray], pos: Tuple[float, float, float], with_offsets=True) -> Tuple[float, float, float]: """ Like apply_transform but inverts the transform first """ |
return apply_transform(inv(t), pos) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def _resin_supervisor_restart():
""" Execute a container restart by requesting it from the supervisor. Note that failures here are returned but most likely will not be sent back to the caller, since this is run in a separate workthread. If the system is not responding, look for these log messages. """ |
supervisor = os.environ.get('RESIN_SUPERVISOR_ADDRESS',
'http://127.0.0.1:48484')
restart_url = supervisor + '/v1/restart'
api = os.environ.get('RESIN_SUPERVISOR_API_KEY', 'unknown')
app_id = os.environ.get('RESIN_APP_ID', 'unknown')
async with aiohttp.ClientSession() as session:
async with session.post(restart_url,
params={'apikey': api},
json={'appId': app_id,
'force': True}) as resp:
body = await resp.read()
if resp.status != 202:
log.error("Could not shut down: {}: {}"
.format(resp.status, body)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self, port: str = None, options: Any = None):
""" Connect to the robot hardware. This function is provided for backwards compatibility. In most cases it need not be called. Calls to this method should be replaced with calls to :py:meth:`.ProtocolContext.connect` (notice the difference in arguments) if necessary; however, since the context of protocols executed by an OT2 is automatically connected to either the hardware or a simulator (depending on whether the protocol is being simulated or run) this should be unnecessary. :param port: The port to connect to the smoothie board or the magic string ``"Virtual Smoothie"``, which will initialize and connect to a simulator :param options: Ignored. """ |
self._hardware.connect(port) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_instr(ctx, name: str, mount: str, *args, **kwargs) -> InstrumentContext: """ Build an instrument in a backwards-compatible way. You should almost certainly not be calling this function from a protocol; if you want to create a pipette on a lower level, use :py:meth:`.ProtocolContext.load_instrument` directly, and if you want to create an instrument easily use one of the partials below. """ |
return ctx.load_instrument(name, Mount[mount.upper()]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, container_name, slot, label=None, share=False):
""" Load a piece of labware by specifying its name and position. This method calls :py:meth:`.ProtocolContext.load_labware_by_name`; see that documentation for more information on arguments and return values. Calls to this function should be replaced with calls to :py:meth:`.Protocolcontext.load_labware_by_name`. In addition, this function contains translations between old labware names and new labware names. """ |
if share:
raise NotImplementedError("Sharing not supported")
try:
name = self.LW_TRANSLATION[container_name]
except KeyError:
if container_name in self.LW_NO_EQUIVALENT:
raise NotImplementedError("Labware {} is not supported"
.format(container_name))
elif container_name in ('magdeck', 'tempdeck'):
raise NotImplementedError("Module load not yet implemented")
else:
name = container_name
return self._ctx.load_labware_by_name(name, slot, label) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(cls, builder, *args, build_loop=None, **kwargs):
""" Build a hardware control API and initialize the adapter in one call :param builder: the builder method to use (e.g. :py:meth:`hardware_control.API.build_hardware_simulator`) :param args: Args to forward to the builder method :param kwargs: Kwargs to forward to the builder method """ |
loop = asyncio.new_event_loop()
kwargs['loop'] = loop
args = [arg for arg in args
if not isinstance(arg, asyncio.AbstractEventLoop)]
if asyncio.iscoroutinefunction(builder):
checked_loop = build_loop or asyncio.get_event_loop()
api = checked_loop.run_until_complete(builder(*args, **kwargs))
else:
api = builder(*args, **kwargs)
return cls(api, loop) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self, port: str = None, force: bool = False):
""" Connect to hardware. :param port: The port to connect to. May be `None`, in which case the hardware will connect to the first serial port it sees with the device name `FT232R`; or port name compatible with `serial.Serial<https://pythonhosted.org/pyserial/pyserial_api.html#serial.Serial.__init__>`_. # noqa(E501) :param force: If `True`, connect even if a lockfile is established. See :py:meth:`.controller.Controller.__init__`. This should only ever be specified as `True` by the main software starting. """ |
old_api = object.__getattribute__(self, '_api')
loop = old_api._loop
new_api = loop.run_until_complete(API.build_hardware_controller(
loop=loop,
port=port,
config=copy.copy(old_api.config),
force=force))
old_api._loop.run_until_complete(new_api.cache_instruments())
setattr(self, '_api', new_api) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def disconnect(self):
""" Disconnect from connected hardware. """ |
old_api = object.__getattribute__(self, '_api')
new_api = API.build_hardware_simulator(
loop=old_api._loop,
config=copy.copy(old_api.config))
setattr(self, '_api', new_api) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_attached_pipettes(self):
""" Mimic the behavior of robot.get_attached_pipettes""" |
api = object.__getattribute__(self, '_api')
instrs = {}
for mount, data in api.attached_instruments.items():
instrs[mount.name.lower()] = {
'model': data.get('name', None),
'id': data.get('pipette_id', None),
'mount_axis': Axis.by_mount(mount),
'plunger_axis': Axis.of_plunger(mount)
}
if data.get('name'):
instrs[mount.name.lower()]['tip_length'] \
= data.get('tip_length', None)
return instrs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unzip_update(filepath: str, progress_callback: Callable[[float], None], acceptable_files: Sequence[str], mandatory_files: Sequence[str], chunk_size: int = 1024) -> Tuple[Mapping[str, Optional[str]], Mapping[str, int]]: """ Unzip an update file The update file must contain - a file called rootfs.ext4 - a file called rootfs.ext4.hash It may contain - a file called rootfs.ext4.hash.sig These will all be unzipped (discarding their leading directories) to the same file as the zipfile. This function is blocking and takes a while. It calls ``progress_callback`` to indicate update progress with a number between 0 and 1 indicating overall archive unzip progress. :param filepath: The path zipfile to unzip. The contents will be in its directory :param progress_callback: A callable taking a number between 0 and 1 that will be called periodically to check progress. This is for user display; it may not reach 1.0 exactly. :param acceptable_files: A list of files to unzip if found. Others will be ignored. :param mandatory_files: A list of files to raise an error about if they're not in the zip. Should probably be a subset of ``acceptable_files``. :param chunk_size: If specified, the size of the chunk to read and write. If not specified, will default to 1024 :return: Two dictionaries, the first mapping file names to paths and the second mapping file names to sizes :raises FileMissing: If a mandatory file is missing """ |
assert chunk_size
total_size = 0
written_size = 0
to_unzip: List[zipfile.ZipInfo] = []
file_paths: Dict[str, Optional[str]] = {fn: None
for fn in acceptable_files}
file_sizes: Dict[str, int] = {fn: 0 for fn in acceptable_files}
LOG.info(f"Unzipping {filepath}")
with zipfile.ZipFile(filepath, 'r') as zf:
files = zf.infolist()
remaining_filenames = [fn for fn in acceptable_files]
for fi in files:
if fi.filename in acceptable_files:
to_unzip.append(fi)
total_size += fi.file_size
remaining_filenames.remove(fi.filename)
LOG.debug(f"Found {fi.filename} ({fi.file_size}B)")
else:
LOG.debug(f"Ignoring {fi.filename}")
for name in remaining_filenames:
if name in mandatory_files:
raise FileMissing(f'File {name} missing from zip')
for fi in to_unzip:
uncomp_path = os.path.join(os.path.dirname(filepath), fi.filename)
with zf.open(fi) as zipped, open(uncomp_path, 'wb') as unzipped:
LOG.debug(f"Beginning unzip of {fi.filename} to {uncomp_path}")
while True:
chunk = zipped.read(chunk_size)
unzipped.write(chunk)
written_size += len(chunk)
progress_callback(written_size/total_size)
if len(chunk) != chunk_size:
break
file_paths[fi.filename] = uncomp_path
file_sizes[fi.filename] = fi.file_size
LOG.debug(f"Unzipped {fi.filename} to {uncomp_path}")
LOG.info(
f"Unzipped {filepath}, results: \n\t" + '\n\t'.join(
[f'{k}: {file_paths[k]} ({file_sizes[k]}B)'
for k in file_paths.keys()]))
return file_paths, file_sizes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hash_file(path: str, progress_callback: Callable[[float], None], chunk_size: int = 1024, file_size: int = None, algo: str = 'sha256') -> bytes: """ Hash a file and return the hash, providing progress callbacks :param path: The file to hash :param progress_callback: The callback to call with progress between 0 and 1. May not ever be precisely 1.0. :param chunk_size: If specified, the size of the chunks to hash in one call If not specified, defaults to 1024 :param file_size: If specified, the size of the file to hash (used for progress callback generation). If not specified, calculated internally. :param algo: The algorithm to use. Can be anything used by :py:mod:`hashlib` :returns: The output has ascii hex """ |
hasher = hashlib.new(algo)
have_read = 0
if not chunk_size:
chunk_size = 1024
with open(path, 'rb') as to_hash:
if not file_size:
file_size = to_hash.seek(0, 2)
to_hash.seek(0)
while True:
chunk = to_hash.read(chunk_size)
hasher.update(chunk)
have_read += len(chunk)
progress_callback(have_read/file_size)
if len(chunk) != chunk_size:
break
return binascii.hexlify(hasher.digest()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_unused_partition() -> RootPartitions: """ Find the currently-unused root partition to write to """ |
which = subprocess.check_output(['ot-unused-partition']).strip()
return {b'2': RootPartitions.TWO,
b'3': RootPartitions.THREE}[which] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_file(infile: str, outfile: str, progress_callback: Callable[[float], None], chunk_size: int = 1024, file_size: int = None):
""" Write a file to another file with progress callbacks. :param infile: The input filepath :param outfile: The output filepath :param progress_callback: The callback to call for progress :param chunk_size: The size of file chunks to copy in between progress notifications :param file_size: The total size of the update file (for generating progress percentage). If ``None``, generated with ``seek``/``tell``. """ |
total_written = 0
with open(infile, 'rb') as img, open(outfile, 'wb') as part:
if None is file_size:
file_size = img.seek(0, 2)
img.seek(0)
LOG.info(f'write_file: file size calculated as {file_size}B')
LOG.info(f'write_file: writing {infile} ({file_size}B)'
f' to {outfile} in {chunk_size}B chunks')
while True:
chunk = img.read(chunk_size)
part.write(chunk)
total_written += len(chunk)
progress_callback(total_written / file_size)
if len(chunk) != chunk_size:
break |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_update(rootfs_filepath: str, progress_callback: Callable[[float], None], chunk_size: int = 1024, file_size: int = None) -> RootPartitions: """ Write the new rootfs to the next root partition - Figure out, from the system, the correct root partition to write to - Write the rootfs at ``rootfs_filepath`` there, with progress :param rootfs_filepath: The path to a checked rootfs.ext4 :param progress_callback: A callback to call periodically with progress between 0 and 1.0. May never reach precisely 1.0, best only for user information. :param chunk_size: The size of file chunks to copy in between progress notifications :param file_size: The total size of the update file (for generating progress percentage). If ``None``, generated with ``seek``/``tell``. :returns: The root partition that the rootfs image was written to, e.g. ``RootPartitions.TWO`` or ``RootPartitions.THREE``. """ |
unused = _find_unused_partition()
part_path = unused.value.path
write_file(rootfs_filepath, part_path, progress_callback,
chunk_size, file_size)
return unused |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _switch_partition() -> RootPartitions: """ Switch the active boot partition using the switch script """ |
res = subprocess.check_output(['ot-switch-partitions'])
for line in res.split(b'\n'):
matches = re.match(
b'Current boot partition: ([23]), setting to ([23])',
line)
if matches:
return {b'2': RootPartitions.TWO,
b'3': RootPartitions.THREE}[matches.group(2)]
else:
raise RuntimeError(f'Bad output from ot-switch-partitions: {res}') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commit_update():
""" Switch the target boot partition. """ |
unused = _find_unused_partition()
new = _switch_partition()
if new != unused:
msg = f"Bad switch: switched to {new} when {unused} was unused"
LOG.error(msg)
raise RuntimeError(msg)
else:
LOG.info(f'commit_update: committed to booting {new}') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_module(self):
""" Returns the module placeable if present """ |
for md in SUPPORTED_MODULES:
maybe_module = self.get_child_by_name(md)
if maybe_module:
# No probability of a placeable having more than one module
return maybe_module
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_children_from_slice(self, s):
""" Retrieves list of children within slice """ |
if isinstance(s.start, str):
s = slice(
self.get_index_from_name(s.start), s.stop, s.step)
if isinstance(s.stop, str):
s = slice(
s.start, self.get_index_from_name(s.stop), s.step)
return WellSeries(self.get_children_list()[s]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all_children(self):
""" Returns all children recursively """ |
my_children = self.get_children_list()
children = []
children.extend(my_children)
for child in my_children:
children.extend(child.get_all_children())
return children |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def containers(self) -> list: """ Returns all containers on a deck as a list """ |
all_containers: List = list()
for slot in self:
all_containers += slot.get_children_list()
for container in all_containers:
if getattr(container, 'stackable', False):
all_containers += container.get_children_list()
return all_containers |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calculate_grid(self):
""" Calculates and stores grid structure """ |
if self.grid is None:
self.grid = self.get_wellseries(self.get_grid())
if self.grid_transposed is None:
self.grid_transposed = self.get_wellseries(
self.transpose(
self.get_grid())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transpose(self, rows):
""" Transposes the grid to allow for cols """ |
res = OrderedDict()
for row, cols in rows.items():
for col, cell in cols.items():
if col not in res:
res[col] = OrderedDict()
res[col][row] = cell
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_wellseries(self, matrix):
""" Returns the grid as a WellSeries of WellSeries """ |
res = OrderedDict()
for col, cells in matrix.items():
if col not in res:
res[col] = OrderedDict()
for row, cell in cells.items():
res[col][row] = self.children_by_name[
''.join(cell)
]
res[col] = WellSeries(res[col], name=col)
return WellSeries(res) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def wells(self, *args, **kwargs):
""" Returns child Well or list of child Wells """ |
if len(args) and isinstance(args[0], list):
args = args[0]
new_wells = None
if not args and not kwargs:
new_wells = WellSeries(self.get_children_list())
elif len(args) > 1:
new_wells = WellSeries([self.well(n) for n in args])
elif 'x' in kwargs or 'y' in kwargs:
new_wells = self._parse_wells_x_y(*args, **kwargs)
else:
new_wells = self._parse_wells_to_and_length(*args, **kwargs)
if len(new_wells) == 1:
return new_wells[0]
return new_wells |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def parse_device_information(
device_info_string: str) -> Mapping[str, str]:
'''
Parse the modules's device information response.
Example response from temp-deck: "serial:aa11 model:bb22 version:cc33"
'''
error_msg = 'Unexpected argument to parse_device_information: {}'.format(
device_info_string)
if not device_info_string or \
not isinstance(device_info_string, str):
raise ParseError(error_msg)
parsed_values = device_info_string.strip().split(' ')
if len(parsed_values) < 3:
log.error(error_msg)
raise ParseError(error_msg)
res = {
parse_key_from_substring(s): parse_string_value_from_substring(s)
for s in parsed_values[:3]
}
for key in ['model', 'version', 'serial']:
if key not in res:
raise ParseError(error_msg)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def simulate(protocol_file, propagate_logs=False, log_level='warning') -> List[Mapping[str, Any]]: """ Simulate the protocol itself. This is a one-stop function to simulate a protocol, whether python or json, no matter the api version, from external (i.e. not bound up in other internal server infrastructure) sources. To simulate an opentrons protocol from other places, pass in a file like object as protocol_file; this function either returns (if the simulation has no problems) or raises an exception. To call from the command line use either the autogenerated entrypoint ``opentrons_simulate`` (``opentrons_simulate.exe``, on windows) or ``python -m opentrons.simulate``. The return value is the run log, a list of dicts that represent the commands executed by the robot. Each dict has the following keys: - ``level``: The depth at which this command is nested - if this an aspirate inside a mix inside a transfer, for instance, it would be 3. - ``payload``: The command, its arguments, and how to format its text. For more specific details see :py:mod:`opentrons.commands`. To format a message from a payload do ``payload['text'].format(**payload)``. - ``logs``: Any log messages that occurred during execution of this command, as a logging.LogRecord :param file-like protocol_file: The protocol file to simulate. :param propagate_logs: Whether this function should allow logs from the Opentrons stack to propagate up to the root handler. This can be useful if you're integrating this function in a larger application, but most logs that occur during protocol simulation are best associated with the actions in the protocol that cause them. :type propagate_logs: bool :param log_level: The level of logs to capture in the runlog :type log_level: 'debug', 'info', 'warning', or 'error' :returns List[Dict[str, Dict[str, Any]]]: A run log for user output. """ |
stack_logger = logging.getLogger('opentrons')
stack_logger.propagate = propagate_logs
contents = protocol_file.read()
if opentrons.config.feature_flags.use_protocol_api_v2():
try:
execute_args = {'protocol_json': json.loads(contents)}
except json.JSONDecodeError:
execute_args = {'protocol_code': contents}
context = opentrons.protocol_api.contexts.ProtocolContext()
context.home()
scraper = CommandScraper(stack_logger, log_level, context.broker)
execute_args.update({'simulate': True,
'context': context})
opentrons.protocol_api.execute.run_protocol(**execute_args)
else:
try:
proto = json.loads(contents)
except json.JSONDecodeError:
proto = contents
opentrons.robot.disconnect()
scraper = CommandScraper(stack_logger, log_level,
opentrons.robot.broker)
if isinstance(proto, dict):
opentrons.protocols.execute_protocol(proto)
else:
exec(proto, {})
return scraper.commands |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" Run the simulation """ |
parser = argparse.ArgumentParser(prog='opentrons_simulate',
description=__doc__)
parser.add_argument(
'protocol', metavar='PROTOCOL_FILE',
type=argparse.FileType('r'),
help='The protocol file to simulate (specify - to read from stdin).')
parser.add_argument(
'-v', '--version', action='version',
version=f'%(prog)s {opentrons.__version__}',
help='Print the opentrons package version and exit')
parser.add_argument(
'-o', '--output', action='store',
help='What to output during simulations',
choices=['runlog', 'nothing'],
default='runlog')
parser.add_argument(
'-l', '--log-level', action='store',
help=('Log level for the opentrons stack. Anything below warning '
'can be chatty'),
choices=['error', 'warning', 'info', 'debug'],
default='warning'
)
args = parser.parse_args()
runlog = simulate(args.protocol, log_level=args.log_level)
if args.output == 'runlog':
print(format_runlog(runlog))
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _command_callback(self, message):
""" The callback subscribed to the broker """ |
payload = message['payload']
if message['$'] == 'before':
self._commands.append({'level': self._depth,
'payload': payload,
'logs': []})
self._depth += 1
else:
while not self._queue.empty():
self._commands[-1]['logs'].append(self._queue.get())
self._depth = max(self._depth-1, 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def _update_firmware(filename, loop):
""" Currently uses the robot singleton from the API server to connect to Smoothie. Those calls should be separated out from the singleton so it can be used directly without requiring a full initialization of the API robot. """ |
try:
from opentrons import robot
except ModuleNotFoundError:
res = "Unable to find module `opentrons`--not updating firmware"
rc = 1
log.error(res)
else:
# ensure there is a reference to the port
if not robot.is_connected():
robot.connect()
# get port name
port = str(robot._driver.port)
# set smoothieware into programming mode
robot._driver._smoothie_programming_mode()
# close the port so other application can access it
robot._driver._connection.close()
# run lpc21isp, THIS WILL TAKE AROUND 1 MINUTE TO COMPLETE
update_cmd = 'lpc21isp -wipe -donotstart {0} {1} {2} 12000'.format(
filename, port, robot.config.serial_speed)
proc = await asyncio.create_subprocess_shell(
update_cmd,
stdout=asyncio.subprocess.PIPE,
loop=loop)
rd = await proc.stdout.read()
res = rd.decode().strip()
await proc.communicate()
rc = proc.returncode
if rc == 0:
# re-open the port
robot._driver._connection.open()
# reset smoothieware
robot._driver._smoothie_reset()
# run setup gcodes
robot._driver._setup()
return res, rc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
async def execute_module_command(request):
""" Execute a command on a given module by its serial number """ |
hw = hw_from_req(request)
requested_serial = request.match_info['serial']
data = await request.json()
command_type = data.get('command_type')
args = data.get('args')
if ff.use_protocol_api_v2():
hw_mods = await hw.discover_modules()
else:
hw_mods = hw.attached_modules.values()
if len(hw_mods) == 0:
return web.json_response({"message": "No connected modules"},
status=404)
matching_mod = next((mod for mod in hw_mods if
mod.device_info.get('serial') == requested_serial),
None)
if not matching_mod:
return web.json_response({"message": "Specified module not found"},
status=404)
if hasattr(matching_mod, command_type):
clean_args = args or []
method = getattr(matching_mod, command_type)
if asyncio.iscoroutinefunction(method):
val = await method(*clean_args)
else:
val = method(*clean_args)
return web.json_response(
{'message': 'Success', 'returnValue': val},
status=200)
else:
return web.json_response(
{'message': f'Module does not have command: {command_type}'},
status=400) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.