text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def transaction(self, func, *watches, **kwargs): """ Convenience method for executing the callable `func` as a transaction while watching all keys specified in `watches`. The 'func' callable should expect a single argument which is a Pipeline object. """
shard_hint = kwargs.pop('shard_hint', None) value_from_callable = kwargs.pop('value_from_callable', False) watch_delay = kwargs.pop('watch_delay', None) async with await self.pipeline(True, shard_hint) as pipe: while True: try: if watches: await pipe.watch(*watches) func_value = await func(pipe) exec_value = await pipe.execute() return func_value if value_from_callable else exec_value except WatchError: if watch_delay is not None and watch_delay > 0: await asyncio.sleep( watch_delay, loop=self.connection_pool.loop ) continue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def initialize(self): """ Init the slots cache by asking all startup nodes what the current cluster configuration is TODO: Currently the last node will have the last say about how the configuration is setup. Maybe it should stop to try after it have correctly covered all slots or when one node is reached and it could execute CLUSTER SLOTS command. """
nodes_cache = {} tmp_slots = {} all_slots_covered = False disagreements = [] startup_nodes_reachable = False nodes = self.orig_startup_nodes # With this option the client will attempt to connect to any of the previous set of nodes instead of the original set of nodes if self.nodemanager_follow_cluster: nodes = self.startup_nodes for node in nodes: try: r = self.get_redis_link(host=node['host'], port=node['port']) cluster_slots = await r.cluster_slots() startup_nodes_reachable = True except ConnectionError: continue except Exception: raise RedisClusterException('ERROR sending "cluster slots" command to redis server: {0}'.format(node)) all_slots_covered = True # If there's only one server in the cluster, its ``host`` is '' # Fix it to the host in startup_nodes if len(cluster_slots) == 1 and len(self.startup_nodes) == 1: single_node_slots = cluster_slots.get((0, self.RedisClusterHashSlots - 1))[0] if len(single_node_slots['host']) == 0: single_node_slots['host'] = self.startup_nodes[0]['host'] single_node_slots['server_type'] = 'master' # No need to decode response because StrictRedis should handle that for us... for min_slot, max_slot in cluster_slots: nodes = cluster_slots.get((min_slot, max_slot)) master_node, slave_nodes = nodes[0], nodes[1:] if master_node['host'] == '': master_node['host'] = node['host'] self.set_node_name(master_node) nodes_cache[master_node['name']] = master_node for i in range(min_slot, max_slot + 1): if i not in tmp_slots: tmp_slots[i] = [master_node] for slave_node in slave_nodes: self.set_node_name(slave_node) nodes_cache[slave_node['name']] = slave_node tmp_slots[i].append(slave_node) else: # Validate that 2 nodes want to use the same slot cache setup if tmp_slots[i][0]['name'] != node['name']: disagreements.append('{0} vs {1} on slot: {2}'.format( tmp_slots[i][0]['name'], node['name'], i), ) if len(disagreements) > 5: raise RedisClusterException('startup_nodes could not agree on a valid slots cache. {0}' .format(', '.join(disagreements))) self.populate_startup_nodes() self.refresh_table_asap = False if self._skip_full_coverage_check: need_full_slots_coverage = False else: need_full_slots_coverage = await self.cluster_require_full_coverage(nodes_cache) # Validate if all slots are covered or if we should try next startup node for i in range(0, self.RedisClusterHashSlots): if i not in tmp_slots and need_full_slots_coverage: all_slots_covered = False if all_slots_covered: # All slots are covered and application can continue to execute break if not startup_nodes_reachable: raise RedisClusterException('Redis Cluster cannot be connected. ' 'Please provide at least one reachable node.') if not all_slots_covered: raise RedisClusterException('Not all slots are covered after query all startup_nodes. ' '{0} of {1} covered...'.format(len(tmp_slots), self.RedisClusterHashSlots)) # Set the tmp variables to the real variables self.slots = tmp_slots self.nodes = nodes_cache self.reinitialize_counter = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def cluster_require_full_coverage(self, nodes_cache): """ if exists 'cluster-require-full-coverage no' config on redis servers, then even all slots are not covered, cluster still will be able to respond """
nodes = nodes_cache or self.nodes async def node_require_full_coverage(node): r_node = self.get_redis_link(host=node['host'], port=node['port']) node_config = await r_node.config_get('cluster-require-full-coverage') return 'yes' in node_config.values() # at least one node should have cluster-require-full-coverage yes for node in nodes.values(): if await node_require_full_coverage(node): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_node(self, host, port, server_type=None): """ Update data for a node. """
node_name = "{0}:{1}".format(host, port) node = { 'host': host, 'port': port, 'name': node_name, 'server_type': server_type } self.nodes[node_name] = node return node
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def populate_startup_nodes(self): """ Do something with all startup nodes and filters out any duplicates """
for item in self.startup_nodes: self.set_node_name(item) for n in self.nodes.values(): if n not in self.startup_nodes: self.startup_nodes.append(n) # freeze it so we can set() it uniq = {frozenset(node.items()) for node in self.startup_nodes} # then thaw it back out into a list of dicts self.startup_nodes = [dict(node) for node in uniq]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self): """ Resets the connection pool back to a clean state. """
self.pid = os.getpid() self._created_connections = 0 self._created_connections_per_node = {} # Dict(Node, Int) self._available_connections = {} # Dict(Node, List) self._in_use_connections = {} # Dict(Node, Set) self._check_lock = threading.Lock() self.initialized = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def disconnect(self): """ Nothing that requires any overwrite. """
all_conns = chain( self._available_connections.values(), self._in_use_connections.values(), ) for node_connections in all_conns: for connection in node_connections: connection.disconnect()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_random_connection(self): """ Open new connection to random redis server. """
if self._available_connections: node_name = random.choice(list(self._available_connections.keys())) conn_list = self._available_connections[node_name] # check it in case of empty connection list if conn_list: return conn_list.pop() for node in self.nodes.random_startup_node_iter(): connection = self.get_connection_by_node(node) if connection: return connection raise Exception("Cant reach a single startup node.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_connection_by_slot(self, slot): """ Determine what server a specific slot belongs to and return a redis object that is connected """
self._checkpid() try: return self.get_connection_by_node(self.get_node_by_slot(slot)) except KeyError: return self.get_random_connection()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_connection_by_node(self, node): """ get a connection by node """
self._checkpid() self.nodes.set_node_name(node) try: # Try to get connection from existing pool connection = self._available_connections.get(node["name"], []).pop() except IndexError: connection = self.make_connection(node) self._in_use_connections.setdefault(node["name"], set()).add(connection) return connection
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def encode(self, value): """ Encode the value so that it's identical to what we'll read off the connection """
if self.decode_responses and isinstance(value, bytes): value = value.decode(self.encoding) elif not self.decode_responses and isinstance(value, str): value = value.encode(self.encoding) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def punsubscribe(self, *args): """ Unsubscribe from the supplied patterns. If empy, unsubscribe from all patterns. """
if args: args = list_or_args(args[0], args[1:]) return await self.execute_command('PUNSUBSCRIBE', *args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def listen(self): "Listen for messages on channels this client has been subscribed to" if self.subscribed: return self.handle_message(await self.parse_response(block=True))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def get_message(self, ignore_subscribe_messages=False, timeout=0): """ Get the next message if one is available, otherwise None. If timeout is specified, the system will wait for `timeout` seconds before returning. Timeout should be specified as a floating point number. """
response = await self.parse_response(block=False, timeout=timeout) if response: return self.handle_message(response, ignore_subscribe_messages) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _gen_identity(self, key, param=None): """generate identity according to key and param given"""
if self.identity_generator and param is not None: if self.serializer: param = self.serializer.serialize(param) if self.compressor: param = self.compressor.compress(param) identity = self.identity_generator.generate(key, param) else: identity = key return identity
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _pack(self, content): """pack the content using serializer and compressor"""
if self.serializer: content = self.serializer.serialize(content) if self.compressor: content = self.compressor.compress(content) return content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _unpack(self, content): """unpack cache using serializer and compressor"""
if self.compressor: try: content = self.compressor.decompress(content) except CompressError: pass if self.serializer: content = self.serializer.deserialize(content) return content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def delete(self, key, param=None): """ delete cache corresponding to identity generated from key and param """
identity = self._gen_identity(key, param) return await self.client.delete(identity)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def delete_pattern(self, pattern, count=None): """ delete cache according to pattern in redis, delete `count` keys each time """
cursor = '0' count_deleted = 0 while cursor != 0: cursor, identities = await self.client.scan( cursor=cursor, match=pattern, count=count ) count_deleted += await self.client.delete(*identities) return count_deleted
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def exist(self, key, param=None): """see if specific identity exists"""
identity = self._gen_identity(key, param) return await self.client.exists(identity)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def ttl(self, key, param=None): """get time to live of a specific identity"""
identity = self._gen_identity(key, param) return await self.client.ttl(identity)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def set(self, key, value, param=None, expire_time=None, herd_timeout=None): """ Use key and param to generate identity and pack the content, expire the key within real_timeout if expire_time is given. real_timeout is equal to the sum of expire_time and herd_time. The content is cached with expire_time. """
identity = self._gen_identity(key, param) expected_expired_ts = int(time.time()) if expire_time: expected_expired_ts += expire_time expected_expired_ts += herd_timeout or self.default_herd_timeout value = self._pack([value, expected_expired_ts]) return await self.client.set(identity, value, ex=expire_time)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def xrange(self, name: str, start='-', end='+', count=None) -> list: """ Read stream values within an interval. Available since 5.0.0. Time complexity: O(log(N)+M) with N being the number of elements in the stream and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(log(N)). :param name: name of the stream. :param start: first stream ID. defaults to '-', meaning the earliest available. :param end: last stream ID. defaults to '+', meaning the latest available. :param count: if set, only return this many items, beginning with the earliest available. :return list of (stream_id, entry(k-v pair)) """
pieces = [start, end] if count is not None: if not isinstance(count, int) or count < 1: raise RedisError("XRANGE count must be a positive integer") pieces.append("COUNT") pieces.append(str(count)) return await self.execute_command('XRANGE', name, *pieces)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def ltrim(self, name, start, end): """ Trim the list ``name``, removing all values not within the slice between ``start`` and ``end`` ``start`` and ``end`` can be negative numbers just like Python slicing notation """
return await self.execute_command('LTRIM', name, start, end)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def block_pipeline_command(func): """ Prints error because some pipelined commands should be blocked when running in cluster-mode """
def inner(*args, **kwargs): raise RedisClusterException( "ERROR: Calling pipelined function {0} is blocked when running redis in cluster mode...".format( func.__name__)) return inner
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def immediate_execute_command(self, *args, **options): """ Execute a command immediately, but don't auto-retry on a ConnectionError if we're already WATCHing a variable. Used when issuing WATCH or subsequent commands retrieving their values but before MULTI is called. """
command_name = args[0] conn = self.connection # if this is the first call, we need a connection if not conn: conn = self.connection_pool.get_connection() self.connection = conn try: await conn.send_command(*args) return await self.parse_response(conn, command_name, **options) except (ConnectionError, TimeoutError) as e: conn.disconnect() if not conn.retry_on_timeout and isinstance(e, TimeoutError): raise # if we're not already watching, we can safely retry the command try: if not self.watching: await conn.send_command(*args) return await self.parse_response(conn, command_name, **options) except ConnectionError: # the retry failed so cleanup. conn.disconnect() await self.reset() raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _determine_slot(self, *args): """ figure out what slot based on command and args """
if len(args) <= 1: raise RedisClusterException("No way to dispatch this command to Redis Cluster. Missing key.") command = args[0] if command in ['EVAL', 'EVALSHA']: numkeys = args[2] keys = args[3: 3 + numkeys] slots = {self.connection_pool.nodes.keyslot(key) for key in keys} if len(slots) != 1: raise RedisClusterException("{0} - all keys must map to the same key slot".format(command)) return slots.pop() key = args[1] return self.connection_pool.nodes.keyslot(key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset(self): """ Reset back to empty pipeline. """
self.command_stack = [] self.scripts = set() self.watches = [] # clean up the other instance attributes self.watching = False self.explicit_transaction = False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def send_cluster_commands(self, stack, raise_on_error=True, allow_redirections=True): """ Send a bunch of cluster commands to the redis cluster. `allow_redirections` If the pipeline should follow `ASK` & `MOVED` responses automatically. If set to false it will raise RedisClusterException. """
# the first time sending the commands we send all of the commands that were queued up. # if we have to run through it again, we only retry the commands that failed. attempt = sorted(stack, key=lambda x: x.position) # build a list of node objects based on node names we need to nodes = {} # as we move through each command that still needs to be processed, # we figure out the slot number that command maps to, then from the slot determine the node. for c in attempt: # refer to our internal node -> slot table that tells us where a given # command should route to. slot = self._determine_slot(*c.args) node = self.connection_pool.get_node_by_slot(slot) # little hack to make sure the node name is populated. probably could clean this up. self.connection_pool.nodes.set_node_name(node) # now that we know the name of the node ( it's just a string in the form of host:port ) # we can build a list of commands for each node. node_name = node['name'] if node_name not in nodes: nodes[node_name] = NodeCommands(self.parse_response, self.connection_pool.get_connection_by_node(node)) nodes[node_name].append(c) # send the commands in sequence. # we write to all the open sockets for each node first, before reading anything # this allows us to flush all the requests out across the network essentially in parallel # so that we can read them all in parallel as they come back. # we dont' multiplex on the sockets as they come available, but that shouldn't make too much difference. node_commands = nodes.values() for n in node_commands: await n.write() for n in node_commands: await n.read() # release all of the redis connections we allocated earlier back into the connection pool. # we used to do this step as part of a try/finally block, but it is really dangerous to # release connections back into the pool if for some reason the socket has data still left in it # from a previous operation. The write and read operations already have try/catch around them for # all known types of errors including connection and socket level errors. # So if we hit an exception, something really bad happened and putting any of # these connections back into the pool is a very bad idea. # the socket might have unread buffer still sitting in it, and then the # next time we read from it we pass the buffered result back from a previous # command and every single request after to that connection will always get # a mismatched result. (not just theoretical, I saw this happen on production x.x). for n in nodes.values(): self.connection_pool.release(n.connection) # if the response isn't an exception it is a valid response from the node # we're all done with that command, YAY! # if we have more commands to attempt, we've run into problems. # collect all the commands we are allowed to retry. # (MOVED, ASK, or connection errors or timeout errors) attempt = sorted([c for c in attempt if isinstance(c.result, ERRORS_ALLOW_RETRY)], key=lambda x: x.position) if attempt and allow_redirections: # RETRY MAGIC HAPPENS HERE! # send these remaing comamnds one at a time using `execute_command` # in the main client. This keeps our retry logic in one place mostly, # and allows us to be more confident in correctness of behavior. # at this point any speed gains from pipelining have been lost # anyway, so we might as well make the best attempt to get the correct # behavior. # # The client command will handle retries for each individual command # sequentially as we pass each one into `execute_command`. Any exceptions # that bubble out should only appear once all retries have been exhausted. # # If a lot of commands have failed, we'll be setting the # flag to rebuild the slots table from scratch. So MOVED errors should # correct themselves fairly quickly. await self.connection_pool.nodes.increment_reinitialize_counter(len(attempt)) for c in attempt: try: # send each command individually like we do in the main client. c.result = await super(StrictClusterPipeline, self).execute_command(*c.args, **c.options) except RedisError as e: c.result = e # turn the response back into a simple flat array that corresponds # to the sequence of commands issued in the stack in pipeline.execute() response = [c.result for c in sorted(stack, key=lambda x: x.position)] if raise_on_error: self.raise_first_error(stack) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def _watch(self, node, conn, names): "Watches the values at keys ``names``" for name in names: slot = self._determine_slot('WATCH', name) dist_node = self.connection_pool.get_node_by_slot(slot) if node.get('name') != dist_node['name']: # raise error if commands in a transaction can not hash to same node if len(node) > 0: raise ClusterTransactionError("Keys in request don't hash to the same node") if self.explicit_transaction: raise RedisError('Cannot issue a WATCH after a MULTI') await conn.send_command('WATCH', *names) return await conn.read_response()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def _unwatch(self, conn): "Unwatches all previously specified keys" await conn.send_command('UNWATCH') res = await conn.read_response() return self.watching and res or True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def write(self): """ Code borrowed from StrictRedis so it can be fixed """
connection = self.connection commands = self.commands # We are going to clobber the commands with the write, so go ahead # and ensure that nothing is sitting there from a previous run. for c in commands: c.result = None # build up all commands into a single request to increase network perf # send all the commands and catch connection and timeout errors. try: await connection.send_packed_command(connection.pack_commands([c.args for c in commands])) except (ConnectionError, TimeoutError) as e: for c in commands: c.result = e
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set(self, type, offset, value): """ Set the specified bit field and returns its old value. """
self._command_stack.extend(['SET', type, offset, value]) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, type, offset): """ Returns the specified bit field. """
self._command_stack.extend(['GET', type, offset]) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def zrange(self, name, start, end, desc=False, withscores=False, score_cast_func=float): """ Return a range of values from sorted set ``name`` between ``start`` and ``end`` sorted in ascending order. ``start`` and ``end`` can be negative, indicating the end of the range. ``desc`` a boolean indicating whether to sort the results descendingly ``withscores`` indicates to return the scores along with the values. The return type is a list of (value, score) pairs ``score_cast_func`` a callable used to cast the score return value """
if desc: return await self.zrevrange(name, start, end, withscores, score_cast_func) pieces = ['ZRANGE', name, start, end] if withscores: pieces.append(b('WITHSCORES')) options = { 'withscores': withscores, 'score_cast_func': score_cast_func } return await self.execute_command(*pieces, **options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def zremrangebyscore(self, name, min, max): """ Remove all elements in the sorted set ``name`` with scores between ``min`` and ``max``. Returns the number of elements removed. """
return await self.execute_command('ZREMRANGEBYSCORE', name, min, max)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def expire(self, name, time): """ Set an expire flag on key ``name`` for ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object. """
if isinstance(time, datetime.timedelta): time = time.seconds + time.days * 24 * 3600 return await self.execute_command('EXPIRE', name, time)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def delete(self, *names): """ "Delete one or more keys specified by ``names``" Cluster impl: Iterate all keys and send DELETE for each key. This will go a lot slower than a normal delete call in StrictRedis. Operation is no longer atomic. """
count = 0 for arg in names: count += await self.execute_command('DEL', arg) return count
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def geoadd(self, name, *values): """ Add the specified geospatial items to the specified key identified by the ``name`` argument. The Geospatial items are given as ordered members of the ``values`` argument, each item or place is formed by the triad latitude, longitude and name. """
if len(values) % 3 != 0: raise RedisError("GEOADD requires places with lon, lat and name" " values") return await self.execute_command('GEOADD', name, *values)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def georadius(self, name, longitude, latitude, radius, unit=None, withdist=False, withcoord=False, withhash=False, count=None, sort=None, store=None, store_dist=None): """ Return the members of the specified key identified by the ``name`` argument which are within the borders of the area specified with the ``latitude`` and ``longitude`` location and the maximum distance from the center specified by the ``radius`` value. The units must be one of the following : m, km mi, ft. By default ``withdist`` indicates to return the distances of each place. ``withcoord`` indicates to return the latitude and longitude of each place. ``withhash`` indicates to return the geohash string of each place. ``count`` indicates to return the number of elements up to N. ``sort`` indicates to return the places in a sorted way, ASC for nearest to fairest and DESC for fairest to nearest. ``store`` indicates to save the places names in a sorted set named with a specific key, each element of the destination sorted set is populated with the score got from the original geo sorted set. ``store_dist`` indicates to save the places names in a sorted set named with a specific key, instead of ``store`` the sorted set destination score is set with the distance. """
return await self._georadiusgeneric('GEORADIUS', name, longitude, latitude, radius, unit=unit, withdist=withdist, withcoord=withcoord, withhash=withhash, count=count, sort=sort, store=store, store_dist=store_dist)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def georadiusbymember(self, name, member, radius, unit=None, withdist=False, withcoord=False, withhash=False, count=None, sort=None, store=None, store_dist=None): """ This command is exactly like ``georadius`` with the sole difference that instead of taking, as the center of the area to query, a longitude and latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set. """
return await self._georadiusgeneric('GEORADIUSBYMEMBER', name, member, radius, unit=unit, withdist=withdist, withcoord=withcoord, withhash=withhash, count=count, sort=sort, store=store, store_dist=store_dist)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def master_for(self, service_name, redis_class=StrictRedis, connection_pool_class=SentinelConnectionPool, **kwargs): """ Returns a redis client instance for the ``service_name`` master. A SentinelConnectionPool class is used to retrive the master's address before establishing a new connection. NOTE: If the master's address has changed, any cached connections to the old master are closed. By default clients will be a redis.StrictRedis instance. Specify a different class to the ``redis_class`` argument if you desire something different. The ``connection_pool_class`` specifies the connection pool to use. The SentinelConnectionPool will be used by default. All other keyword arguments are merged with any connection_kwargs passed to this class and passed to the connection pool as keyword arguments to be used to initialize Redis connections. """
kwargs['is_master'] = True connection_kwargs = dict(self.connection_kwargs) connection_kwargs.update(kwargs) return redis_class(connection_pool=connection_pool_class( service_name, self, **connection_kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def extend(self, additional_time): """ Adds more time to an already acquired lock. ``additional_time`` can be specified as an integer or a float, both representing the number of seconds to add. """
if self.local.token is None: raise LockError("Cannot extend an unlocked lock") if self.timeout is None: raise LockError("Cannot extend a lock with no timeout") return await self.do_extend(additional_time)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def cluster_delslots(self, *slots): """ Set hash slots as unbound in the cluster. It determines by it self what node the slot is in and sends it there Returns a list of the results for each processed slot. """
cluster_nodes = self._nodes_slots_to_slots_nodes(await self.cluster_nodes()) res = list() for slot in slots: res.append(await self.execute_command('CLUSTER DELSLOTS', slot, node_id=cluster_nodes[slot])) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def cluster_failover(self, node_id, option): """ Forces a slave to perform a manual failover of its master Sends to specefied node """
if not isinstance(option, str) or option.upper() not in {'FORCE', 'TAKEOVER'}: raise ClusterError('Wrong option provided') return await self.execute_command('CLUSTER FAILOVER', option, node_id=node_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def cluster_reset(self, node_id, soft=True): """ Reset a Redis Cluster node If 'soft' is True then it will send 'SOFT' argument If 'soft' is False then it will send 'HARD' argument Sends to specefied node """
option = 'SOFT' if soft else 'HARD' return await self.execute_command('CLUSTER RESET', option, node_id=node_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def cluster_reset_all_nodes(self, soft=True): """ Send CLUSTER RESET to all nodes in the cluster If 'soft' is True then it will send 'SOFT' argument If 'soft' is False then it will send 'HARD' argument Sends to all nodes in the cluster """
option = 'SOFT' if soft else 'HARD' res = list() for node in await self.cluster_nodes(): res.append( await self.execute_command( 'CLUSTER RESET', option, node_id=node['id'] )) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def cluster_setslot(self, node_id, slot_id, state): """ Bind an hash slot to a specific node Sends to specified node """
if state.upper() in {'IMPORTING', 'MIGRATING', 'NODE'} and node_id is not None: return await self.execute_command('CLUSTER SETSLOT', slot_id, state, node_id) elif state.upper() == 'STABLE': return await self.execute_command('CLUSTER SETSLOT', slot_id, 'STABLE') else: raise RedisError('Invalid slot state: {0}'.format(state))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def execute(self, keys=[], args=[], client=None): "Execute the script, passing any required ``args``" if client is None: client = self.registered_client args = tuple(keys) + tuple(args) # make sure the Redis server knows about the script if isinstance(client, BasePipeline): # make sure this script is good to go on pipeline client.scripts.add(self) try: return await client.evalsha(self.sha, len(keys), *args) except NoScriptError: # Maybe the client is pointed to a differnet server than the client # that created this instance? # Overwrite the sha just in case there was a discrepancy. self.sha = await client.script_load(self.script) return await client.evalsha(self.sha, len(keys), *args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _random_id(self, size=16, chars=string.ascii_uppercase + string.digits): """ Generates a random id based on `size` and `chars` variable. By default it will generate a 16 character long string based on ascii uppercase letters and digits. """
return ''.join(random.choice(chars) for _ in range(size))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def sentinel_monitor(self, name, ip, port, quorum): "Add a new master to Sentinel to be monitored" return await self.execute_command('SENTINEL MONITOR', name, ip, port, quorum)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def sentinel_set(self, name, option, value): "Set Sentinel monitoring parameters for a given master" return await self.execute_command('SENTINEL SET', name, option, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def sdiff(self, keys, *args): "Return the difference of sets specified by ``keys``" args = list_or_args(keys, args) return await self.execute_command('SDIFF', *args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def spop(self, name, count=None): """ Remove and return a random member of set ``name`` ``count`` should be type of int and default set to 1. If ``count`` is supplied, pops a list of ``count`` random + members of set ``name`` """
if count and isinstance(count, int): return await self.execute_command('SPOP', name, count) else: return await self.execute_command('SPOP', name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def srandmember(self, name, number=None): """ If ``number`` is None, returns a random member of set ``name``. If ``number`` is supplied, returns a list of ``number`` random memebers of set ``name``. Note this is only available when running Redis 2.6+. """
args = number and [number] or [] return await self.execute_command('SRANDMEMBER', name, *args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def sunion(self, keys, *args): "Return the union of sets specified by ``keys``" args = list_or_args(keys, args) return await self.execute_command('SUNION', *args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def sdiffstore(self, dest, keys, *args): """ Store the difference of sets specified by ``keys`` into a new set named ``dest``. Returns the number of keys in the new set. Overwrites dest key if it exists. Cluster impl: Use sdiff() --> Delete dest key --> store result in dest key """
res = await self.sdiff(keys, *args) await self.delete(dest) if not res: return 0 return await self.sadd(dest, *res)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def execute_command(self, *args, **kwargs): """ Send a command to a node in the cluster """
if not self.connection_pool.initialized: await self.connection_pool.initialize() if not args: raise RedisClusterException("Unable to determine command to use") command = args[0] node = self.determine_node(*args, **kwargs) if node: return await self.execute_command_on_nodes(node, *args, **kwargs) # If set externally we must update it before calling any commands if self.refresh_table_asap: await self.connection_pool.nodes.initialize() self.refresh_table_asap = False redirect_addr = None asking = False try_random_node = False slot = self._determine_slot(*args) ttl = int(self.RedisClusterRequestTTL) while ttl > 0: ttl -= 1 if asking: node = self.connection_pool.nodes.nodes[redirect_addr] r = self.connection_pool.get_connection_by_node(node) elif try_random_node: r = self.connection_pool.get_random_connection() try_random_node = False else: if self.refresh_table_asap: # MOVED node = self.connection_pool.get_master_node_by_slot(slot) else: node = self.connection_pool.get_node_by_slot(slot) r = self.connection_pool.get_connection_by_node(node) try: if asking: await r.send_command('ASKING') await self.parse_response(r, "ASKING", **kwargs) asking = False await r.send_command(*args) return await self.parse_response(r, command, **kwargs) except (RedisClusterException, BusyLoadingError): raise except (CancelledError, ConnectionError, TimeoutError): try_random_node = True if ttl < self.RedisClusterRequestTTL / 2: await asyncio.sleep(0.1) except ClusterDownError as e: self.connection_pool.disconnect() self.connection_pool.reset() self.refresh_table_asap = True raise e except MovedError as e: # Reinitialize on ever x number of MovedError. # This counter will increase faster when the same client object # is shared between multiple threads. To reduce the frequency you # can set the variable 'reinitialize_steps' in the constructor. self.refresh_table_asap = True await self.connection_pool.nodes.increment_reinitialize_counter() node = self.connection_pool.nodes.set_node(e.host, e.port, server_type='master') self.connection_pool.nodes.slots[e.slot_id][0] = node except TryAgainError as e: if ttl < self.RedisClusterRequestTTL / 2: await asyncio.sleep(0.05) except AskError as e: redirect_addr, asking = "{0}:{1}".format(e.host, e.port), True finally: self.connection_pool.release(r) raise ClusterError('TTL exhausted.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def on_connect(self, connection): "Called when the stream connects" self._stream = connection._reader self._buffer = SocketBuffer(self._stream, self._read_size) if connection.decode_responses: self.encoding = connection.encoding
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def on_disconnect(self): "Called when the stream disconnects" if self._stream is not None: self._stream = None if self._buffer is not None: self._buffer.close() self._buffer = None self.encoding = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
async def can_read(self): "See if there's data that can be read." if not (self._reader and self._writer): await self.connect() return self._parser.can_read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def pack_commands(self, commands): "Pack multiple commands into the Redis protocol" output = [] pieces = [] buffer_length = 0 for cmd in commands: for chunk in self.pack_command(*cmd): pieces.append(chunk) buffer_length += len(chunk) if buffer_length > 6000: output.append(SYM_EMPTY.join(pieces)) buffer_length = 0 pieces = [] if pieces: output.append(SYM_EMPTY.join(pieces)) return output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: async def on_connect(self): """ Initialize the connection, authenticate and select a database and send READONLY if it is set during object initialization. """
if self.db: warnings.warn('SELECT DB is not allowed in cluster mode') self.db = '' await super(ClusterConnection, self).on_connect() if self.readonly: await self.send_command('READONLY') if nativestr(await self.read_response()) != 'OK': raise ConnectionError('READONLY command failed')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_schema(self, schema): """ Merge in a JSON schema. This can be a ``dict`` or another ``SchemaBuilder`` :param schema: a JSON Schema .. note:: There is no schema validation. If you pass in a bad schema, you might get back a bad schema. """
if isinstance(schema, SchemaBuilder): schema_uri = schema.schema_uri schema = schema.to_schema() if schema_uri is None: del schema['$schema'] elif isinstance(schema, SchemaNode): schema = schema.to_schema() if '$schema' in schema: self.schema_uri = self.schema_uri or schema['$schema'] schema = dict(schema) del schema['$schema'] self._root_node.add_schema(schema)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_schema(self): """ Generate a schema based on previous inputs. :rtype: ``dict`` """
schema = self._base_schema() schema.update(self._root_node.to_schema()) return schema
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_json(self, *args, **kwargs): """ Generate a schema and convert it directly to serialized JSON. :rtype: ``str`` """
return json.dumps(self.to_schema(), *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_long_docs(*filenames): """Build rst description from a set of files."""
docs = [] for filename in filenames: with open(filename, 'r') as f: docs.append(f.read()) return "\n\n".join(docs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_schema(self, schema): """ Merges in an existing schema. arguments: * `schema` (required - `dict` or `SchemaNode`): an existing JSON Schema to merge. """
# serialize instances of SchemaNode before parsing if isinstance(schema, SchemaNode): schema = schema.to_schema() for subschema in self._get_subschemas(schema): # delegate to SchemaType object schema_generator = self._get_generator_for_schema(subschema) schema_generator.add_schema(subschema) # return self for easy method chaining return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_object(self, obj): """ Modify the schema to accommodate an object. arguments: * `obj` (required - `dict`): a JSON object to use in generating the schema. """
# delegate to SchemaType object schema_generator = self._get_generator_for_object(obj) schema_generator.add_object(obj) # return self for easy method chaining return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_schema(self): """ Convert the current schema to a `dict`. """
types = set() generated_schemas = [] for schema_generator in self._schema_generators: generated_schema = schema_generator.to_schema() if len(generated_schema) == 1 and 'type' in generated_schema: types.add(generated_schema['type']) else: generated_schemas.append(generated_schema) if types: if len(types) == 1: (types,) = types else: types = sorted(types) generated_schemas = [{'type': types}] + generated_schemas if len(generated_schemas) == 1: (result_schema,) = generated_schemas elif generated_schemas: result_schema = {'anyOf': generated_schemas} else: result_schema = {} return result_schema
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def hsts_header(self): """Returns the proper HSTS policy."""
hsts_policy = 'max-age={0}'.format(self.hsts_age) if self.hsts_include_subdomains: hsts_policy += '; includeSubDomains' return hsts_policy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def skip(self): """Checks the skip list."""
# Should we skip? if self.skip_list and isinstance(self.skip_list, list): for skip in self.skip_list: if request.path.startswith('/{0}'.format(skip)): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def redirect_to_ssl(self): """Redirect incoming requests to HTTPS."""
# Should we redirect? criteria = [ request.is_secure, current_app.debug, current_app.testing, request.headers.get('X-Forwarded-Proto', 'http') == 'https' ] if not any(criteria) and not self.skip: if request.url.startswith('http://'): url = request.url.replace('http://', 'https://', 1) code = 302 if self.permanent: code = 301 r = redirect(url, code=code) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_hue(self, hue, duration=0, rapid=False): """ hue to set duration in ms"""
color = self.get_color() color2 = (hue, color[1], color[2], color[3]) try: if rapid: self.fire_and_forget(LightSetColor, {"color": color2, "duration": duration}, num_repeats=1) else: self.req_with_ack(LightSetColor, {"color": color2, "duration": duration}) except WorkflowException as e: raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_saturation(self, saturation, duration=0, rapid=False): """ saturation to set duration in ms"""
color = self.get_color() color2 = (color[0], saturation, color[2], color[3]) try: if rapid: self.fire_and_forget(LightSetColor, {"color": color2, "duration": duration}, num_repeats=1) else: self.req_with_ack(LightSetColor, {"color": color2, "duration": duration}) except WorkflowException as e: raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_brightness(self, brightness, duration=0, rapid=False): """ brightness to set duration in ms"""
color = self.get_color() color2 = (color[0], color[1], brightness, color[3]) try: if rapid: self.fire_and_forget(LightSetColor, {"color": color2, "duration": duration}, num_repeats=1) else: self.req_with_ack(LightSetColor, {"color": color2, "duration": duration}) except WorkflowException as e: raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _rotate(edges, step): """ Simulate the cube rotation by updating four edges. """
step = Step(step) result = set() movement = { "U": "RFLB", "D": "LFRB", "R": "FUBD", "L": "FDBU", "F": "URDL", "B": "ULDR", }[step.face] movement = { movement[i]: movement[(i + step.is_clockwise + (-1 * step.is_counter_clockwise) + (2 * step.is_180)) % 4] for i in range(4) } for edge in edges: if step.face not in edge: result.add(edge.copy()) else: k = (set(edge.facings.keys()) - {step.face}).pop() new_edge = Edge(**{ step.face: edge[step.face], movement[k]: edge[k], }) result.add(new_edge) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cross_successors(state, last_action=None): """ Successors function for solving the cross. """
centres, edges = state acts = sum([ [s, s.inverse(), s * 2] for s in map(Step, "RUFDRB".replace(last_action.face if last_action else "", "", 1)) ], []) for step in acts: yield step, (centres, CrossSolver._rotate(edges, step))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cross_goal(state): """ The goal function for cross solving search. """
centres, edges = state for edge in edges: if "D" not in edge.facings: return False if edge["D"] != centres["D"]["D"]: return False k = "".join(edge.facings.keys()).replace("D", "") if edge[k] != centres[k][k]: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve(self): """ Solve the cross. """
result = Formula(path_actions(a_star_search( ({f: self.cube[f] for f in "LUFDRB"}, self.cube.select_type("edge") & self.cube.has_colour(self.cube["D"].colour)), self.cross_successors, self.cross_state_value, self.cross_goal, ))) self.cube(result) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_solved(self): """ Check if the cross of Cube is solved. """
return self.cross_goal(({f: self.cube[f] for f in "LUFDRB"}, self.cube.select_type("edge") & self.cube.has_colour(self.cube["D"].colour)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recognise(self): """ Recognise the PLL case of Cube. """
result = "" for side in "LFRB": for square in self.cube.get_face(side)[0]: for _side in "LFRB": if square.colour == self.cube[_side].colour: result += _side break return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve(self): """ Solve PLL of Cube. """
if not isinstance(self.cube, Cube): raise ValueError("Use Solver.feed(cube) to feed the cube to solver.") for i in range(4): rec_id = self.recognise() if rec_id in algo_dict: self.cube(algo_dict[rec_id]) return Formula((Step("y") * i) or []) + algo_dict[rec_id] self.cube(Step("y")) raise ValueError("Invalid cube.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_solved(self): """ Check if Cube is solved. """
for side in "LUFDRB": sample = self.cube[side].facings[side] for square in sum(self.cube.get_face(side), []): if square != sample: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recognise(self): """ Recognise which is Cube's OLL case. """
if not isinstance(self.cube, Cube): raise ValueError("Use Solver.feed(cube) to feed the cube to solver.") result = "" for face in "LFRB": for square in self.cube.get_face(face)[0]: result += str(int(square == self.cube["U"]["U"])) if result not in algo_dict: raise ValueError("Invalid Cube, probably didn't solve F2L, or wrong input value.\nUse Solver.feed(cube) to reset the cube.") self.case = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve(self): """ Solve the OLL. Returns an Formula. """
if not isinstance(self.cube, Cube): raise ValueError("Use Solver.feed(cube) to feed the cube to solver.") self.recognise() self.cube(algo_dict[self.case]) return algo_dict[self.case]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def feed(self, cube, pair): """ Feed Cube to the solver. """
self.cube = cube if pair not in ["FR", "RB", "BL", "LF"]: pair = ["FR", "RB", "BL", "LF"][["RF", "BR", "LB", "FL"].index(pair)] self.pair = pair
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def estimated_position(self): """ Get the estimated cubie of solved pair. """
corner = {"D":self.cube["D"]["D"]} edge = {} for cubie in (corner, edge): for face in self.pair: cubie.update({face:self.cube[face][face]}) return (Corner(**corner), Edge(**edge))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_slot(self): """ Get the slot position of this pair. """
corner, edge = self.get_pair() corner_slot, edge_slot = corner.location.replace("D", "", 1), edge.location if "U" not in corner_slot and corner_slot not in ["FR", "RB", "BL", "LF"]: corner_slot = ["FR", "RB", "BL", "LF"][["RF", "BR", "LB", "FL"].index(corner_slot)] if "U" not in edge_slot and edge_slot not in ["FR", "RB", "BL", "LF"]: edge_slot = ["FR", "RB", "BL", "LF"][["RF", "BR", "LB", "FL"].index(edge_slot)] if "U" in corner_slot and "U" in edge_slot: return ("SLOTFREE", (None, None), (corner, edge)) if "U" in corner_slot: return ("CSLOTFREE", (None, edge_slot), (corner, edge)) if "U" in edge_slot: return ("ESLOTFREE", (corner_slot, None), (corner, edge)) if corner_slot not in [edge_slot, edge_slot[::-1]]: return ("DIFFSLOT", (corner_slot, edge_slot), (corner, edge)) if (corner, edge) == self.estimated_position(): return ("SOLVED", (corner_slot, edge_slot), (corner, edge)) return ("WRONGSLOT", (corner_slot, edge_slot), (corner, edge))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combining_goal(state): """ Check if two Cubies are combined on the U face. """
((corner, edge), (L, U, F, D, R, B)) = state if "U" not in corner or "U" not in edge: return False if set(edge).issubset(set(corner)): return True elif set(edge.facings.keys()).issubset(set(corner.facings.keys())): return False opposite = {"L":"R", "R":"L", "F":"B", "B":"F"} edge_facings = list(edge) for i, (face, square) in enumerate(edge_facings): if face == "U": if square != corner[opposite[edge_facings[(i+1)%2][0]]]: return False else: if square != corner["U"]: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _rotate(pair, step): """ Simulate the cube rotation by updating the pair. """
step = Step(step) movement = { "U": "RFLB", "D": "LFRB", "R": "FUBD", "L": "FDBU", "F": "URDL", "B": "ULDR", }[step.face] movement = { movement[i]: movement[(i + step.is_clockwise + (-1 * step.is_counter_clockwise) + (2 * step.is_180)) % 4] for i in range(4) } for cubie in pair: if step.face not in cubie: if cubie.type == "edge": result_edge = cubie.copy() else: result_corner = cubie.copy() else: result = {} for face, square in cubie: if face not in movement: result[face] = square else: result[movement[face]] = square if len(result) == 2: result_edge = Edge(**result) else: result_corner = Corner(**result) return (result_corner, result_edge)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combining_successors(state, last_action=()): """ Successors function for finding path of combining F2L pair. """
((corner, edge), (L, U, F, D, R, B)) = state U_turns = [Formula("U"), Formula("U'"), Formula("U2")] if len(last_action) != 1 else [] R_turns = [Formula("R U R'"), Formula("R U' R'"), Formula("R U2 R'")] if "R" not in last_action else [] F_turns = [Formula("F' U F"), Formula("F' U' F"), Formula("F' U2 F")] if "F" not in last_action else [] for act in (U_turns + R_turns + F_turns): new = (corner, edge) for q in act: new = F2LPairSolver._rotate(new, q) yield act, (new, (L, U, F, D, R, B))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combining_search(self): """ Searching the path for combining the pair. """
start = ( self.get_pair(), ( self.cube["L"], self.cube["U"], self.cube["F"], self.cube["D"], self.cube["R"], self.cube["B"], ), ) return sum(path_actions(a_star_search(start, self.combining_successors, lambda x: len(x), self.combining_goal)), Formula())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combining_setup(self): """ Setup for some special F2L cases. """
(slot_type, (corner_slot, edge_slot), (corner, edge)) = self.get_slot() cycle = ["FR", "RB", "BL", "LF"] if slot_type == "SLOTFREE": return ("FR", Formula(Step("y") * cycle.index(self.pair) or [])) elif slot_type == "CSLOTFREE": return (cycle[-(cycle.index(edge_slot) - cycle.index(self.pair))], Formula(Step("y") * cycle.index(edge_slot) or [])) elif slot_type in ("ESLOTFREE", "WRONGSLOT"): return (cycle[-(cycle.index(corner_slot) - cycle.index(self.pair))], Formula(Step("y") * cycle.index(corner_slot) or [])) elif slot_type == "DIFFSLOT": if corner_slot != self.pair: corner_slot, edge_slot = edge_slot, corner_slot result = Formula(Step("y") * cycle.index(edge_slot) or []) result += Formula("R U R'") result += Formula(Step("y'") * cycle.index(edge_slot) or []) result += Formula(Step("y") * cycle.index(corner_slot) or []) if result[-1].face == "y" and result[-2].face == "y": result[-2] += result[-1] del result[-1] return (cycle[-(cycle.index(corner_slot) - cycle.index(self.pair))], result) else: return (cycle[-cycle.index(self.pair)], Formula())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combine(self): """ Combine the pair. """
self.pair, setup = self.combining_setup() self.cube(setup) actual = self.combining_search() self.cube(actual) return setup + actual
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def solve(self): """ Solve the pair. """
cycle = ["FR", "RB", "BL", "LF"] combine = self.combine() put = Formula(Step("y") * cycle.index(self.pair) or []) self.cube(put) self.pair = "FR" estimated = self.estimated_position() for U_act in [Formula(), Formula("U"), Formula("U2"), Formula("U'")]: self.cube(U_act) for put_act in [Formula("R U R'"), Formula("R U' R'"), Formula("R U2 R'"), Formula("F' U F"), Formula("F' U' F"), Formula("F' U2 F")]: self.cube(put_act) if self.get_pair() == estimated: return combine + put + U_act + put_act self.cube(put_act.reverse()) self.cube(U_act.reverse())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_solved(self): """ Check if Cube's F2L is solved. """
if self.cube.D == [[Square(self.cube["D"].colour)] * 3] * 3: for face in "LFRB": if self.cube.get_face(face)[1:] != [[Square(self.cube[face].colour)] * 3] * 2: return False return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def authorization_code_pkce(self, client_id, code_verifier, code, redirect_uri, grant_type='authorization_code'): """Authorization code pkce grant This is the OAuth 2.0 grant that mobile apps utilize in order to access an API. Use this endpoint to exchange an Authorization Code for a Token. Args: grant_type (str): Denotes the flow you're using. For authorization code pkce use authorization_code client_id (str): your application's client Id code_verifier (str): Cryptographically random key that was used to generate the code_challenge passed to /authorize. code (str): The Authorization Code received from the /authorize Calls redirect_uri (str, optional): This is required only if it was set at the GET /authorize endpoint. The values must match Returns: access_token, id_token """
return self.post( 'https://{}/oauth/token'.format(self.domain), data={ 'client_id': client_id, 'code_verifier': code_verifier, 'code': code, 'grant_type': grant_type, 'redirect_uri': redirect_uri, }, headers={'Content-Type': 'application/json'} )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def client_credentials(self, client_id, client_secret, audience, grant_type='client_credentials'): """Client credentials grant This is the OAuth 2.0 grant that server processes utilize in order to access an API. Use this endpoint to directly request an access_token by using the Application Credentials (a Client Id and a Client Secret). Args: grant_type (str): Denotes the flow you're using. For client credentials use client_credentials client_id (str): your application's client Id client_secret (str): your application's client Secret audience (str): The unique identifier of the target API you want to access. Returns: access_token """
return self.post( 'https://{}/oauth/token'.format(self.domain), data={ 'client_id': client_id, 'client_secret': client_secret, 'audience': audience, 'grant_type': grant_type, }, headers={'Content-Type': 'application/json'} )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, id, fields=None, include_fields=True): """Retrieve connection by id. Args: id (str): Id of the connection to get. fields (list of str, optional): A list of fields to include or exclude from the result (depending on include_fields). Empty to retrieve all fields. include_fields (bool, optional): True if the fields specified are to be included in the result, False otherwise. See: https://auth0.com/docs/api/management/v2#!/Connections/get_connections_by_id Returns: A connection object. """
params = {'fields': fields and ','.join(fields) or None, 'include_fields': str(include_fields).lower()} return self.client.get(self._url(id), params=params)