code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _string2Duration(text): """ CONVERT SIMPLE <float><type> TO A DURATION OBJECT """ if text == "" or text == "zero": return ZERO amount, interval = re.match(r"([\d\.]*)(.*)", text).groups() amount = int(amount) if amount else 1 if MILLI_VALUES[interval] == None: from mo_logs import Log Log.error( "{{interval|quote}} in {{text|quote}} is not a recognized duration type (did you use the pural form by mistake?", interval=interval, text=text ) output = Duration(0) if MONTH_VALUES[interval] == 0: output.milli = amount * MILLI_VALUES[interval] else: output.milli = amount * MONTH_VALUES[interval] * MILLI_VALUES.month output.month = amount * MONTH_VALUES[interval] return output
CONVERT SIMPLE <float><type> TO A DURATION OBJECT
Below is the the instruction that describes the task: ### Input: CONVERT SIMPLE <float><type> TO A DURATION OBJECT ### Response: def _string2Duration(text): """ CONVERT SIMPLE <float><type> TO A DURATION OBJECT """ if text == "" or text == "zero": return ZERO amount, interval = re.match(r"([\d\.]*)(.*)", text).groups() amount = int(amount) if amount else 1 if MILLI_VALUES[interval] == None: from mo_logs import Log Log.error( "{{interval|quote}} in {{text|quote}} is not a recognized duration type (did you use the pural form by mistake?", interval=interval, text=text ) output = Duration(0) if MONTH_VALUES[interval] == 0: output.milli = amount * MILLI_VALUES[interval] else: output.milli = amount * MONTH_VALUES[interval] * MILLI_VALUES.month output.month = amount * MONTH_VALUES[interval] return output
def pairs(self, strand, cutoff=0.001, temp=37.0, pseudo=False, material=None, dangles='some', sodium=1.0, magnesium=0.0): '''Compute the pair probabilities for an ordered complex of strands. Runs the \'pairs\' command. :param strand: Strand on which to run pairs. Strands must be either coral.DNA or coral.RNA). :type strand: list :param cutoff: Only probabilities above this cutoff appear in the output. :type cutoff: float :param temp: Temperature setting for the computation. Negative values are not allowed. :type temp: float :param pseudo: Enable pseudoknots. :type pseudo: bool :param material: The material setting to use in the computation. If set to None (the default), the material type is inferred from the strands. Other settings available: 'dna' for DNA parameters, 'rna' for RNA (1995) parameters, and 'rna1999' for the RNA 1999 parameters. :type material: str :param dangles: How to treat dangles in the computation. From the user guide: For \'none\': Dangle energies are ignored. For \'some\': \'A dangle energy is incorporated for each unpaired base flanking a duplex\'. For 'all': all dangle energy is considered. :type dangles: str :param sodium: Sodium concentration in solution (molar), only applies to DNA. :type sodium: float :param magnesium: Magnesium concentration in solution (molar), only applies to DNA> :type magnesium: float :returns: The probability matrix, where the (i, j)th entry is the probability that base i is bound to base j. The matrix is augmented (it's N+1 by N+1, where N is the number of bases in the sequence) with an (N+1)th column containing the probability that each base is unpaired. :rtype: numpy.array ''' # Set the material (will be used to set command material flag) material = self._set_material(strand, material) # Set up command flags cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium, magnesium, multi=False) # Set up the input file and run the command. Note: no STDOUT lines = [str(strand)] self._run('pairs', cmd_args, lines) # Read the output from file ppairs = self._read_tempfile('pairs.ppairs') data = re.search('\n\n\d*\n(.*)', ppairs, flags=re.DOTALL).group(1) N = len(strand) data_lines = [line.split('\t') for line in data.split('\n') if line] prob_matrix = self._pairs_to_np(data_lines, N) return prob_matrix
Compute the pair probabilities for an ordered complex of strands. Runs the \'pairs\' command. :param strand: Strand on which to run pairs. Strands must be either coral.DNA or coral.RNA). :type strand: list :param cutoff: Only probabilities above this cutoff appear in the output. :type cutoff: float :param temp: Temperature setting for the computation. Negative values are not allowed. :type temp: float :param pseudo: Enable pseudoknots. :type pseudo: bool :param material: The material setting to use in the computation. If set to None (the default), the material type is inferred from the strands. Other settings available: 'dna' for DNA parameters, 'rna' for RNA (1995) parameters, and 'rna1999' for the RNA 1999 parameters. :type material: str :param dangles: How to treat dangles in the computation. From the user guide: For \'none\': Dangle energies are ignored. For \'some\': \'A dangle energy is incorporated for each unpaired base flanking a duplex\'. For 'all': all dangle energy is considered. :type dangles: str :param sodium: Sodium concentration in solution (molar), only applies to DNA. :type sodium: float :param magnesium: Magnesium concentration in solution (molar), only applies to DNA> :type magnesium: float :returns: The probability matrix, where the (i, j)th entry is the probability that base i is bound to base j. The matrix is augmented (it's N+1 by N+1, where N is the number of bases in the sequence) with an (N+1)th column containing the probability that each base is unpaired. :rtype: numpy.array
Below is the the instruction that describes the task: ### Input: Compute the pair probabilities for an ordered complex of strands. Runs the \'pairs\' command. :param strand: Strand on which to run pairs. Strands must be either coral.DNA or coral.RNA). :type strand: list :param cutoff: Only probabilities above this cutoff appear in the output. :type cutoff: float :param temp: Temperature setting for the computation. Negative values are not allowed. :type temp: float :param pseudo: Enable pseudoknots. :type pseudo: bool :param material: The material setting to use in the computation. If set to None (the default), the material type is inferred from the strands. Other settings available: 'dna' for DNA parameters, 'rna' for RNA (1995) parameters, and 'rna1999' for the RNA 1999 parameters. :type material: str :param dangles: How to treat dangles in the computation. From the user guide: For \'none\': Dangle energies are ignored. For \'some\': \'A dangle energy is incorporated for each unpaired base flanking a duplex\'. For 'all': all dangle energy is considered. :type dangles: str :param sodium: Sodium concentration in solution (molar), only applies to DNA. :type sodium: float :param magnesium: Magnesium concentration in solution (molar), only applies to DNA> :type magnesium: float :returns: The probability matrix, where the (i, j)th entry is the probability that base i is bound to base j. The matrix is augmented (it's N+1 by N+1, where N is the number of bases in the sequence) with an (N+1)th column containing the probability that each base is unpaired. :rtype: numpy.array ### Response: def pairs(self, strand, cutoff=0.001, temp=37.0, pseudo=False, material=None, dangles='some', sodium=1.0, magnesium=0.0): '''Compute the pair probabilities for an ordered complex of strands. Runs the \'pairs\' command. :param strand: Strand on which to run pairs. Strands must be either coral.DNA or coral.RNA). :type strand: list :param cutoff: Only probabilities above this cutoff appear in the output. :type cutoff: float :param temp: Temperature setting for the computation. Negative values are not allowed. :type temp: float :param pseudo: Enable pseudoknots. :type pseudo: bool :param material: The material setting to use in the computation. If set to None (the default), the material type is inferred from the strands. Other settings available: 'dna' for DNA parameters, 'rna' for RNA (1995) parameters, and 'rna1999' for the RNA 1999 parameters. :type material: str :param dangles: How to treat dangles in the computation. From the user guide: For \'none\': Dangle energies are ignored. For \'some\': \'A dangle energy is incorporated for each unpaired base flanking a duplex\'. For 'all': all dangle energy is considered. :type dangles: str :param sodium: Sodium concentration in solution (molar), only applies to DNA. :type sodium: float :param magnesium: Magnesium concentration in solution (molar), only applies to DNA> :type magnesium: float :returns: The probability matrix, where the (i, j)th entry is the probability that base i is bound to base j. The matrix is augmented (it's N+1 by N+1, where N is the number of bases in the sequence) with an (N+1)th column containing the probability that each base is unpaired. :rtype: numpy.array ''' # Set the material (will be used to set command material flag) material = self._set_material(strand, material) # Set up command flags cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium, magnesium, multi=False) # Set up the input file and run the command. Note: no STDOUT lines = [str(strand)] self._run('pairs', cmd_args, lines) # Read the output from file ppairs = self._read_tempfile('pairs.ppairs') data = re.search('\n\n\d*\n(.*)', ppairs, flags=re.DOTALL).group(1) N = len(strand) data_lines = [line.split('\t') for line in data.split('\n') if line] prob_matrix = self._pairs_to_np(data_lines, N) return prob_matrix
def translate_request(request): """This function takes the arguments passed to the request handler and uses them to generate a WSGI compatible environ dictionary. """ class AwaitablePayload(object): def __init__(self, payload): self.payload = payload or b'' async def read(self, length=None): if length is None: r = self.payload self.payload = b'' else: r = self.payload[:length] self.payload = self.payload[length:] return r uri_parts = urlsplit(request.url) environ = { 'wsgi.input': AwaitablePayload(request.body), 'wsgi.errors': sys.stderr, 'wsgi.version': (1, 0), 'wsgi.async': True, 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False, 'SERVER_SOFTWARE': 'sanic', 'REQUEST_METHOD': request.method, 'QUERY_STRING': uri_parts.query or '', 'RAW_URI': request.url, 'SERVER_PROTOCOL': 'HTTP/' + request.version, 'REMOTE_ADDR': '127.0.0.1', 'REMOTE_PORT': '0', 'SERVER_NAME': 'sanic', 'SERVER_PORT': '0', 'sanic.request': request } for hdr_name, hdr_value in request.headers.items(): hdr_name = hdr_name.upper() if hdr_name == 'CONTENT-TYPE': environ['CONTENT_TYPE'] = hdr_value continue elif hdr_name == 'CONTENT-LENGTH': environ['CONTENT_LENGTH'] = hdr_value continue key = 'HTTP_%s' % hdr_name.replace('-', '_') if key in environ: hdr_value = '%s,%s' % (environ[key], hdr_value) environ[key] = hdr_value environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http') path_info = uri_parts.path environ['PATH_INFO'] = path_info environ['SCRIPT_NAME'] = '' return environ
This function takes the arguments passed to the request handler and uses them to generate a WSGI compatible environ dictionary.
Below is the the instruction that describes the task: ### Input: This function takes the arguments passed to the request handler and uses them to generate a WSGI compatible environ dictionary. ### Response: def translate_request(request): """This function takes the arguments passed to the request handler and uses them to generate a WSGI compatible environ dictionary. """ class AwaitablePayload(object): def __init__(self, payload): self.payload = payload or b'' async def read(self, length=None): if length is None: r = self.payload self.payload = b'' else: r = self.payload[:length] self.payload = self.payload[length:] return r uri_parts = urlsplit(request.url) environ = { 'wsgi.input': AwaitablePayload(request.body), 'wsgi.errors': sys.stderr, 'wsgi.version': (1, 0), 'wsgi.async': True, 'wsgi.multithread': False, 'wsgi.multiprocess': False, 'wsgi.run_once': False, 'SERVER_SOFTWARE': 'sanic', 'REQUEST_METHOD': request.method, 'QUERY_STRING': uri_parts.query or '', 'RAW_URI': request.url, 'SERVER_PROTOCOL': 'HTTP/' + request.version, 'REMOTE_ADDR': '127.0.0.1', 'REMOTE_PORT': '0', 'SERVER_NAME': 'sanic', 'SERVER_PORT': '0', 'sanic.request': request } for hdr_name, hdr_value in request.headers.items(): hdr_name = hdr_name.upper() if hdr_name == 'CONTENT-TYPE': environ['CONTENT_TYPE'] = hdr_value continue elif hdr_name == 'CONTENT-LENGTH': environ['CONTENT_LENGTH'] = hdr_value continue key = 'HTTP_%s' % hdr_name.replace('-', '_') if key in environ: hdr_value = '%s,%s' % (environ[key], hdr_value) environ[key] = hdr_value environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http') path_info = uri_parts.path environ['PATH_INFO'] = path_info environ['SCRIPT_NAME'] = '' return environ
def dump_handler_config_data(self): ''' Return capture handler configuration data. Return a dictionary of capture handler configuration data of the form: .. code-block:: none [{ 'handler': <handler configuration dictionary>, 'log_file_path': <Path to the current log file that the logger is writing. Note that if rotation is used it\'s possible this data will be stale eventually.>, 'conn_type': <The string defining the connection type of the logger.>, 'address': <The list containing address info that the logger is using for its connection.> }, ...] ''' ignored_keys = ['logger', 'log_rot_time', 'reads', 'data_read'] config_data = [] for h in self.capture_handlers: config_data.append({ 'handler': { k:v for k, v in h.iteritems() if k not in ignored_keys }, 'log_file_path': h['logger']._stream.name, 'conn_type': self.conn_type, 'address': self.address, }) return config_data
Return capture handler configuration data. Return a dictionary of capture handler configuration data of the form: .. code-block:: none [{ 'handler': <handler configuration dictionary>, 'log_file_path': <Path to the current log file that the logger is writing. Note that if rotation is used it\'s possible this data will be stale eventually.>, 'conn_type': <The string defining the connection type of the logger.>, 'address': <The list containing address info that the logger is using for its connection.> }, ...]
Below is the the instruction that describes the task: ### Input: Return capture handler configuration data. Return a dictionary of capture handler configuration data of the form: .. code-block:: none [{ 'handler': <handler configuration dictionary>, 'log_file_path': <Path to the current log file that the logger is writing. Note that if rotation is used it\'s possible this data will be stale eventually.>, 'conn_type': <The string defining the connection type of the logger.>, 'address': <The list containing address info that the logger is using for its connection.> }, ...] ### Response: def dump_handler_config_data(self): ''' Return capture handler configuration data. Return a dictionary of capture handler configuration data of the form: .. code-block:: none [{ 'handler': <handler configuration dictionary>, 'log_file_path': <Path to the current log file that the logger is writing. Note that if rotation is used it\'s possible this data will be stale eventually.>, 'conn_type': <The string defining the connection type of the logger.>, 'address': <The list containing address info that the logger is using for its connection.> }, ...] ''' ignored_keys = ['logger', 'log_rot_time', 'reads', 'data_read'] config_data = [] for h in self.capture_handlers: config_data.append({ 'handler': { k:v for k, v in h.iteritems() if k not in ignored_keys }, 'log_file_path': h['logger']._stream.name, 'conn_type': self.conn_type, 'address': self.address, }) return config_data
def validate(self, path): """Run path against filter sets and return True if all pass""" # Exclude hidden files and folders with '.' prefix if os.path.basename(path).startswith('.'): return False # Check that current path level is more than min path and less than max path if not self.check_level(path): return False if self.filters: if not self._level_filters(path): return False # Force include and exclude iterations to be strings in case of integer filters # Handle exclusions if self.to_exclude: if any(str(ex).lower() in path.lower() for ex in self.to_exclude): return False # Handle inclusions if self.to_include: if not any(str(inc).lower() in path.lower() for inc in self.to_include): return False return True
Run path against filter sets and return True if all pass
Below is the the instruction that describes the task: ### Input: Run path against filter sets and return True if all pass ### Response: def validate(self, path): """Run path against filter sets and return True if all pass""" # Exclude hidden files and folders with '.' prefix if os.path.basename(path).startswith('.'): return False # Check that current path level is more than min path and less than max path if not self.check_level(path): return False if self.filters: if not self._level_filters(path): return False # Force include and exclude iterations to be strings in case of integer filters # Handle exclusions if self.to_exclude: if any(str(ex).lower() in path.lower() for ex in self.to_exclude): return False # Handle inclusions if self.to_include: if not any(str(inc).lower() in path.lower() for inc in self.to_include): return False return True
def gt(self, v, limit=None, offset=None): """Returns the list of the members of the set that have scores greater than v. """ if limit is not None and offset is None: offset = 0 return self.zrangebyscore("(%f" % v, self._max_score, start=offset, num=limit)
Returns the list of the members of the set that have scores greater than v.
Below is the the instruction that describes the task: ### Input: Returns the list of the members of the set that have scores greater than v. ### Response: def gt(self, v, limit=None, offset=None): """Returns the list of the members of the set that have scores greater than v. """ if limit is not None and offset is None: offset = 0 return self.zrangebyscore("(%f" % v, self._max_score, start=offset, num=limit)
def pitch_class_to_semitone(pitch_class): r'''Convert a pitch class to semitone. Parameters ---------- pitch_class : str Spelling of a given pitch class, e.g. 'C#', 'Gbb' Returns ------- semitone : int Semitone value of the pitch class. ''' semitone = 0 for idx, char in enumerate(pitch_class): if char == '#' and idx > 0: semitone += 1 elif char == 'b' and idx > 0: semitone -= 1 elif idx == 0: semitone = PITCH_CLASSES.get(char) else: raise InvalidChordException( "Pitch class improperly formed: %s" % pitch_class) return semitone % 12
r'''Convert a pitch class to semitone. Parameters ---------- pitch_class : str Spelling of a given pitch class, e.g. 'C#', 'Gbb' Returns ------- semitone : int Semitone value of the pitch class.
Below is the the instruction that describes the task: ### Input: r'''Convert a pitch class to semitone. Parameters ---------- pitch_class : str Spelling of a given pitch class, e.g. 'C#', 'Gbb' Returns ------- semitone : int Semitone value of the pitch class. ### Response: def pitch_class_to_semitone(pitch_class): r'''Convert a pitch class to semitone. Parameters ---------- pitch_class : str Spelling of a given pitch class, e.g. 'C#', 'Gbb' Returns ------- semitone : int Semitone value of the pitch class. ''' semitone = 0 for idx, char in enumerate(pitch_class): if char == '#' and idx > 0: semitone += 1 elif char == 'b' and idx > 0: semitone -= 1 elif idx == 0: semitone = PITCH_CLASSES.get(char) else: raise InvalidChordException( "Pitch class improperly formed: %s" % pitch_class) return semitone % 12
def CreateLink(target_path, link_path, override=True): ''' Create a symbolic link at `link_path` pointing to `target_path`. :param unicode target_path: Link target :param unicode link_path: Fullpath to link name :param bool override: If True and `link_path` already exists as a link, that link is overridden. ''' _AssertIsLocal(target_path) _AssertIsLocal(link_path) if override and IsLink(link_path): DeleteLink(link_path) # Create directories leading up to link dirname = os.path.dirname(link_path) if dirname: CreateDirectory(dirname) if sys.platform != 'win32': return os.symlink(target_path, link_path) # @UndefinedVariable else: #import ntfsutils.junction #return ntfsutils.junction.create(target_path, link_path) import jaraco.windows.filesystem return jaraco.windows.filesystem.symlink(target_path, link_path) from ._easyfs_win32 import CreateSymbolicLink try: dw_flags = 0 if target_path and os.path.isdir(target_path): dw_flags = 1 return CreateSymbolicLink(target_path, link_path, dw_flags) except Exception as e: reraise(e, 'Creating link "%(link_path)s" pointing to "%(target_path)s"' % locals())
Create a symbolic link at `link_path` pointing to `target_path`. :param unicode target_path: Link target :param unicode link_path: Fullpath to link name :param bool override: If True and `link_path` already exists as a link, that link is overridden.
Below is the the instruction that describes the task: ### Input: Create a symbolic link at `link_path` pointing to `target_path`. :param unicode target_path: Link target :param unicode link_path: Fullpath to link name :param bool override: If True and `link_path` already exists as a link, that link is overridden. ### Response: def CreateLink(target_path, link_path, override=True): ''' Create a symbolic link at `link_path` pointing to `target_path`. :param unicode target_path: Link target :param unicode link_path: Fullpath to link name :param bool override: If True and `link_path` already exists as a link, that link is overridden. ''' _AssertIsLocal(target_path) _AssertIsLocal(link_path) if override and IsLink(link_path): DeleteLink(link_path) # Create directories leading up to link dirname = os.path.dirname(link_path) if dirname: CreateDirectory(dirname) if sys.platform != 'win32': return os.symlink(target_path, link_path) # @UndefinedVariable else: #import ntfsutils.junction #return ntfsutils.junction.create(target_path, link_path) import jaraco.windows.filesystem return jaraco.windows.filesystem.symlink(target_path, link_path) from ._easyfs_win32 import CreateSymbolicLink try: dw_flags = 0 if target_path and os.path.isdir(target_path): dw_flags = 1 return CreateSymbolicLink(target_path, link_path, dw_flags) except Exception as e: reraise(e, 'Creating link "%(link_path)s" pointing to "%(target_path)s"' % locals())
def get_color_stops(self): """Return this gradient’s color stops so far. :returns: A list of ``(offset, red, green, blue, alpha)`` tuples of floats. """ count = ffi.new('int *') _check_status(cairo.cairo_pattern_get_color_stop_count( self._pointer, count)) stops = [] stop = ffi.new('double[5]') for i in range(count[0]): _check_status(cairo.cairo_pattern_get_color_stop_rgba( self._pointer, i, stop + 0, stop + 1, stop + 2, stop + 3, stop + 4)) stops.append(tuple(stop)) return stops
Return this gradient’s color stops so far. :returns: A list of ``(offset, red, green, blue, alpha)`` tuples of floats.
Below is the the instruction that describes the task: ### Input: Return this gradient’s color stops so far. :returns: A list of ``(offset, red, green, blue, alpha)`` tuples of floats. ### Response: def get_color_stops(self): """Return this gradient’s color stops so far. :returns: A list of ``(offset, red, green, blue, alpha)`` tuples of floats. """ count = ffi.new('int *') _check_status(cairo.cairo_pattern_get_color_stop_count( self._pointer, count)) stops = [] stop = ffi.new('double[5]') for i in range(count[0]): _check_status(cairo.cairo_pattern_get_color_stop_rgba( self._pointer, i, stop + 0, stop + 1, stop + 2, stop + 3, stop + 4)) stops.append(tuple(stop)) return stops
def copy_fs( src_fs, # type: Union[FS, Text] dst_fs, # type: Union[FS, Text] walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy the contents of one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy. """ return copy_dir( src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers )
Copy the contents of one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy.
Below is the the instruction that describes the task: ### Input: Copy the contents of one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy. ### Response: def copy_fs( src_fs, # type: Union[FS, Text] dst_fs, # type: Union[FS, Text] walker=None, # type: Optional[Walker] on_copy=None, # type: Optional[_OnCopy] workers=0, # type: int ): # type: (...) -> None """Copy the contents of one filesystem to another. Arguments: src_fs (FS or str): Source filesystem (URL or instance). dst_fs (FS or str): Destination filesystem (URL or instance). walker (~fs.walk.Walker, optional): A walker object that will be used to scan for files in ``src_fs``. Set this if you only want to consider a sub-set of the resources in ``src_fs``. on_copy (callable): A function callback called after a single file copy is executed. Expected signature is ``(src_fs, src_path, dst_fs, dst_path)``. workers (int): Use `worker` threads to copy data, or ``0`` (default) for a single-threaded copy. """ return copy_dir( src_fs, "/", dst_fs, "/", walker=walker, on_copy=on_copy, workers=workers )
def transpose(self): ''' transpose all calibration arrays in case different array shape orders were used (x,y) vs. (y,x) ''' def _t(item): if type(item) == list: for n, it in enumerate(item): if type(it) == tuple: it = list(it) item[n] = it if type(it) == list: _t(it) if isinstance(it, np.ndarray) and it.shape == s: item[n] = it.T s = self.coeffs['shape'] for item in self.coeffs.values(): if type(item) == dict: for item2 in item.values(): _t(item2) else: _t(item) self.coeffs['shape'] = s[::-1]
transpose all calibration arrays in case different array shape orders were used (x,y) vs. (y,x)
Below is the the instruction that describes the task: ### Input: transpose all calibration arrays in case different array shape orders were used (x,y) vs. (y,x) ### Response: def transpose(self): ''' transpose all calibration arrays in case different array shape orders were used (x,y) vs. (y,x) ''' def _t(item): if type(item) == list: for n, it in enumerate(item): if type(it) == tuple: it = list(it) item[n] = it if type(it) == list: _t(it) if isinstance(it, np.ndarray) and it.shape == s: item[n] = it.T s = self.coeffs['shape'] for item in self.coeffs.values(): if type(item) == dict: for item2 in item.values(): _t(item2) else: _t(item) self.coeffs['shape'] = s[::-1]
def load_dataset(self, dataset_key, force_update=False, auto_update=False): """Load a dataset from the local filesystem, downloading it from data.world first, if necessary. This function returns an object of type `LocalDataset`. The object allows access to metedata via it's `describe()` method and to all the data via three properties `raw_data`, `tables` and `dataframes`, all of which are mappings (dict-like structures). :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param force_update: Flag, indicating if a new copy of the dataset should be downloaded replacing any previously downloaded copy (Default value = False) :type force_update: bool :param auto_update: Flag, indicating that dataset be updated to the latest version :type auto_update: bool :returns: The object representing the dataset :rtype: LocalDataset :raises RestApiError: If a server error occurs """ owner_id, dataset_id = parse_dataset_key(dataset_key) cache_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'latest') backup_dir = None if path.isdir(cache_dir) and force_update: backup_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'backup') move_cache_dir_to_backup_dir(backup_dir, cache_dir) descriptor_file = path.join(cache_dir, 'datapackage.json') if not path.isfile(descriptor_file): try: descriptor_file = self.api_client.download_datapackage( dataset_key, cache_dir) except RestApiError as e: if backup_dir is not None: shutil.move(backup_dir, cache_dir) warn('Unable to download datapackage ({}). ' 'Loading previously saved version.'.format(e.reason)) else: raise else: try: dataset_info = self.api_client.get_dataset(dataset_key) except RestApiError as e: return LocalDataset(descriptor_file) last_modified = datetime.strptime(dataset_info['updated'], '%Y-%m-%dT%H:%M:%S.%fZ') if (last_modified > datetime.utcfromtimestamp( path.getmtime(str(descriptor_file)))): if auto_update: try: backup_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'backup') move_cache_dir_to_backup_dir(backup_dir, cache_dir) descriptor_file = self.api_client. \ download_datapackage(dataset_key, cache_dir) except RestApiError as e: if backup_dir is not None: shutil.move(backup_dir, cache_dir) warn('Unable to auto update datapackage ({}). ' 'Loading previously saved version.' .format(e.reason)) else: raise else: filterwarnings('always', message='You are using an outdated copy') warn('You are using an outdated copy of {}. ' 'If you wish to use the latest version, call this ' 'function with the argument ' 'auto_update=True or ' 'force_update=True'.format(dataset_key)) if backup_dir is not None: shutil.rmtree(backup_dir, ignore_errors=True) return LocalDataset(descriptor_file)
Load a dataset from the local filesystem, downloading it from data.world first, if necessary. This function returns an object of type `LocalDataset`. The object allows access to metedata via it's `describe()` method and to all the data via three properties `raw_data`, `tables` and `dataframes`, all of which are mappings (dict-like structures). :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param force_update: Flag, indicating if a new copy of the dataset should be downloaded replacing any previously downloaded copy (Default value = False) :type force_update: bool :param auto_update: Flag, indicating that dataset be updated to the latest version :type auto_update: bool :returns: The object representing the dataset :rtype: LocalDataset :raises RestApiError: If a server error occurs
Below is the the instruction that describes the task: ### Input: Load a dataset from the local filesystem, downloading it from data.world first, if necessary. This function returns an object of type `LocalDataset`. The object allows access to metedata via it's `describe()` method and to all the data via three properties `raw_data`, `tables` and `dataframes`, all of which are mappings (dict-like structures). :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param force_update: Flag, indicating if a new copy of the dataset should be downloaded replacing any previously downloaded copy (Default value = False) :type force_update: bool :param auto_update: Flag, indicating that dataset be updated to the latest version :type auto_update: bool :returns: The object representing the dataset :rtype: LocalDataset :raises RestApiError: If a server error occurs ### Response: def load_dataset(self, dataset_key, force_update=False, auto_update=False): """Load a dataset from the local filesystem, downloading it from data.world first, if necessary. This function returns an object of type `LocalDataset`. The object allows access to metedata via it's `describe()` method and to all the data via three properties `raw_data`, `tables` and `dataframes`, all of which are mappings (dict-like structures). :param dataset_key: Dataset identifier, in the form of owner/id or of a url :type dataset_key: str :param force_update: Flag, indicating if a new copy of the dataset should be downloaded replacing any previously downloaded copy (Default value = False) :type force_update: bool :param auto_update: Flag, indicating that dataset be updated to the latest version :type auto_update: bool :returns: The object representing the dataset :rtype: LocalDataset :raises RestApiError: If a server error occurs """ owner_id, dataset_id = parse_dataset_key(dataset_key) cache_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'latest') backup_dir = None if path.isdir(cache_dir) and force_update: backup_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'backup') move_cache_dir_to_backup_dir(backup_dir, cache_dir) descriptor_file = path.join(cache_dir, 'datapackage.json') if not path.isfile(descriptor_file): try: descriptor_file = self.api_client.download_datapackage( dataset_key, cache_dir) except RestApiError as e: if backup_dir is not None: shutil.move(backup_dir, cache_dir) warn('Unable to download datapackage ({}). ' 'Loading previously saved version.'.format(e.reason)) else: raise else: try: dataset_info = self.api_client.get_dataset(dataset_key) except RestApiError as e: return LocalDataset(descriptor_file) last_modified = datetime.strptime(dataset_info['updated'], '%Y-%m-%dT%H:%M:%S.%fZ') if (last_modified > datetime.utcfromtimestamp( path.getmtime(str(descriptor_file)))): if auto_update: try: backup_dir = path.join(self._config.cache_dir, owner_id, dataset_id, 'backup') move_cache_dir_to_backup_dir(backup_dir, cache_dir) descriptor_file = self.api_client. \ download_datapackage(dataset_key, cache_dir) except RestApiError as e: if backup_dir is not None: shutil.move(backup_dir, cache_dir) warn('Unable to auto update datapackage ({}). ' 'Loading previously saved version.' .format(e.reason)) else: raise else: filterwarnings('always', message='You are using an outdated copy') warn('You are using an outdated copy of {}. ' 'If you wish to use the latest version, call this ' 'function with the argument ' 'auto_update=True or ' 'force_update=True'.format(dataset_key)) if backup_dir is not None: shutil.rmtree(backup_dir, ignore_errors=True) return LocalDataset(descriptor_file)
def validate_words(word_list): ''' Checks for each edited word in word_list if that word is a valid english word.abs Returns all validated words as a set instance. ''' if word_list is None: return {} elif isinstance(word_list, list): if not word_list: return {} else: return set(word for word in word_list if word in WORD_DISTRIBUTION) else: raise InputError("list variable not passed as argument to validate_words")
Checks for each edited word in word_list if that word is a valid english word.abs Returns all validated words as a set instance.
Below is the the instruction that describes the task: ### Input: Checks for each edited word in word_list if that word is a valid english word.abs Returns all validated words as a set instance. ### Response: def validate_words(word_list): ''' Checks for each edited word in word_list if that word is a valid english word.abs Returns all validated words as a set instance. ''' if word_list is None: return {} elif isinstance(word_list, list): if not word_list: return {} else: return set(word for word in word_list if word in WORD_DISTRIBUTION) else: raise InputError("list variable not passed as argument to validate_words")
def _line_tracer(self, frame, exc_info=False): """This function is called when debugger has decided that it must stop or break at this frame.""" # next logging statement commented for performance _logger.f_debug("user_line() with " "threadName=%s, frame=%s, frame.f_code=%s, self.mainpyfile=%s," "self.should_break_here()=%s, self.should_stop_here()=%s\n", threading.currentThread().name, hex(id(frame)), frame.f_code, self.mainpyfile, self.should_break_here(frame), self.should_stop_here(frame)) # next lines allow to focus debugging on only one thread if self.debugged_thread_ident is None: self.debugged_thread_ident = threading.currentThread().ident self.debugged_thread_name = threading.currentThread().name else: if threading.currentThread().ident != self.debugged_thread_ident: return # Acquire Breakpoint Lock before sending break command to remote client self._active_breakpoint_lock.acquire() self.status = 'stopped' frames = self.dump_frames(frame) exception=None warning_messages = [] if exc_info: exception = { 'type': IKPdbRepr(exc_info[1]), 'info': exc_info[1].args[0] } if self.stop_at_first_statement: warning_messages = ["IKP3db stopped so that you can setup some " "breakpoints before 'Resuming' execution."] self.stop_at_first_statement = False remote_client.send('programBreak', frames=frames, threads= self.get_threads(), result={'executionStatus': 'stopped'}, #=self.status warning_messages=warning_messages, exception=exception) # Enter a loop to process commands sent by client while True: command = self._command_q.get() if command['cmd'] == 'resume': self.setup_resume() break elif command['cmd'] == 'stepOver': self.setup_step_over(frame) break elif command['cmd'] == 'stepInto': self.setup_step_into(frame) break elif command['cmd'] == 'stepOut': self.setup_step_out(frame) break elif command['cmd'] == 'evaluate': value, result_type = self.evaluate(command['frame'], command['expression'], command['global'], disable_break=command['disableBreak']) remote_client.reply(command['obj'], {'value': value, 'type': result_type}) elif command['cmd'] == 'getProperties': error_messages = [] if command.get('id', False): po_value = ctypes.cast(command['id'], ctypes.py_object).value result={'properties': self.extract_object_properties(po_value) or []} command_exec_status = 'ok' else: result={'properties': self.extract_object_properties(None) or []} command_exec_status = 'ok' _logger.e_debug(" => %s", result) remote_client.reply(command['obj'], result, command_exec_status=command_exec_status, error_messages=error_messages) elif command['cmd'] == 'setVariable': error_messages = [] result = {} command_exec_status = 'ok' # TODO: Rework to use id now that we are in right thread context err_message = self.let_variable(command['frame'], command['name'], command['value']) if err_message: command_exec_status = 'error' msg = "setVariable(%s=%s) failed with error: %s" % (command['name'], command['value'], err_message) error_messages = [msg] _logger.e_error(msg) remote_client.reply(command['obj'], result, command_exec_status=command_exec_status, error_messages=error_messages) elif command['cmd'] == '_InternalQuit': _logger.x_critical("Exiting tracer upon reception of _Internal" "Quit command") raise IKPdbQuit() else: _logger.x_critical("Unknown command: %s received by _line_tracer()" % resume_command) raise IKPdbQuit() self.status = 'running' self._active_breakpoint_lock.release() return
This function is called when debugger has decided that it must stop or break at this frame.
Below is the the instruction that describes the task: ### Input: This function is called when debugger has decided that it must stop or break at this frame. ### Response: def _line_tracer(self, frame, exc_info=False): """This function is called when debugger has decided that it must stop or break at this frame.""" # next logging statement commented for performance _logger.f_debug("user_line() with " "threadName=%s, frame=%s, frame.f_code=%s, self.mainpyfile=%s," "self.should_break_here()=%s, self.should_stop_here()=%s\n", threading.currentThread().name, hex(id(frame)), frame.f_code, self.mainpyfile, self.should_break_here(frame), self.should_stop_here(frame)) # next lines allow to focus debugging on only one thread if self.debugged_thread_ident is None: self.debugged_thread_ident = threading.currentThread().ident self.debugged_thread_name = threading.currentThread().name else: if threading.currentThread().ident != self.debugged_thread_ident: return # Acquire Breakpoint Lock before sending break command to remote client self._active_breakpoint_lock.acquire() self.status = 'stopped' frames = self.dump_frames(frame) exception=None warning_messages = [] if exc_info: exception = { 'type': IKPdbRepr(exc_info[1]), 'info': exc_info[1].args[0] } if self.stop_at_first_statement: warning_messages = ["IKP3db stopped so that you can setup some " "breakpoints before 'Resuming' execution."] self.stop_at_first_statement = False remote_client.send('programBreak', frames=frames, threads= self.get_threads(), result={'executionStatus': 'stopped'}, #=self.status warning_messages=warning_messages, exception=exception) # Enter a loop to process commands sent by client while True: command = self._command_q.get() if command['cmd'] == 'resume': self.setup_resume() break elif command['cmd'] == 'stepOver': self.setup_step_over(frame) break elif command['cmd'] == 'stepInto': self.setup_step_into(frame) break elif command['cmd'] == 'stepOut': self.setup_step_out(frame) break elif command['cmd'] == 'evaluate': value, result_type = self.evaluate(command['frame'], command['expression'], command['global'], disable_break=command['disableBreak']) remote_client.reply(command['obj'], {'value': value, 'type': result_type}) elif command['cmd'] == 'getProperties': error_messages = [] if command.get('id', False): po_value = ctypes.cast(command['id'], ctypes.py_object).value result={'properties': self.extract_object_properties(po_value) or []} command_exec_status = 'ok' else: result={'properties': self.extract_object_properties(None) or []} command_exec_status = 'ok' _logger.e_debug(" => %s", result) remote_client.reply(command['obj'], result, command_exec_status=command_exec_status, error_messages=error_messages) elif command['cmd'] == 'setVariable': error_messages = [] result = {} command_exec_status = 'ok' # TODO: Rework to use id now that we are in right thread context err_message = self.let_variable(command['frame'], command['name'], command['value']) if err_message: command_exec_status = 'error' msg = "setVariable(%s=%s) failed with error: %s" % (command['name'], command['value'], err_message) error_messages = [msg] _logger.e_error(msg) remote_client.reply(command['obj'], result, command_exec_status=command_exec_status, error_messages=error_messages) elif command['cmd'] == '_InternalQuit': _logger.x_critical("Exiting tracer upon reception of _Internal" "Quit command") raise IKPdbQuit() else: _logger.x_critical("Unknown command: %s received by _line_tracer()" % resume_command) raise IKPdbQuit() self.status = 'running' self._active_breakpoint_lock.release() return
def write_parent (self, url_data): """Write url_data.parent_url.""" self.write(self.part('parenturl') + self.spaces("parenturl")) txt = url_data.parent_url if url_data.line > 0: txt += _(", line %d") % url_data.line if url_data.column > 0: txt += _(", col %d") % url_data.column if url_data.page > 0: txt += _(", page %d") % url_data.page self.writeln(txt, color=self.colorparent)
Write url_data.parent_url.
Below is the the instruction that describes the task: ### Input: Write url_data.parent_url. ### Response: def write_parent (self, url_data): """Write url_data.parent_url.""" self.write(self.part('parenturl') + self.spaces("parenturl")) txt = url_data.parent_url if url_data.line > 0: txt += _(", line %d") % url_data.line if url_data.column > 0: txt += _(", col %d") % url_data.column if url_data.page > 0: txt += _(", page %d") % url_data.page self.writeln(txt, color=self.colorparent)
def highlighted_abstract(self): """Highlight the found terms in the abstract text.""" abstract_terms = self.fields.get('abstract', []) if abstract_terms: sql = _read_sql_file('highlighted-abstract') else: sql = _read_sql_file('get-abstract') arguments = {'id': self['id'], 'query': ' & '.join(abstract_terms), } with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute(sql, arguments) hl_abstract = cursor.fetchone() if hl_abstract: return hl_abstract[0]
Highlight the found terms in the abstract text.
Below is the the instruction that describes the task: ### Input: Highlight the found terms in the abstract text. ### Response: def highlighted_abstract(self): """Highlight the found terms in the abstract text.""" abstract_terms = self.fields.get('abstract', []) if abstract_terms: sql = _read_sql_file('highlighted-abstract') else: sql = _read_sql_file('get-abstract') arguments = {'id': self['id'], 'query': ' & '.join(abstract_terms), } with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute(sql, arguments) hl_abstract = cursor.fetchone() if hl_abstract: return hl_abstract[0]
def _get_parent_classes_transparent(cls, slot, page, instance=None): """ Return all parent classes including those marked as "transparent". """ parent_classes = super(CascadePluginBase, cls).get_parent_classes(slot, page, instance) if parent_classes is None: if cls.get_require_parent(slot, page) is False: return parent_classes = [] # add all plugins marked as 'transparent', since they all are potential parents parent_classes = set(parent_classes) parent_classes.update(TransparentContainer.get_plugins()) return list(parent_classes)
Return all parent classes including those marked as "transparent".
Below is the the instruction that describes the task: ### Input: Return all parent classes including those marked as "transparent". ### Response: def _get_parent_classes_transparent(cls, slot, page, instance=None): """ Return all parent classes including those marked as "transparent". """ parent_classes = super(CascadePluginBase, cls).get_parent_classes(slot, page, instance) if parent_classes is None: if cls.get_require_parent(slot, page) is False: return parent_classes = [] # add all plugins marked as 'transparent', since they all are potential parents parent_classes = set(parent_classes) parent_classes.update(TransparentContainer.get_plugins()) return list(parent_classes)
def deploy_master_contract(self, deployer_account=None, deployer_private_key=None) -> str: """ Deploy master contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address """ assert deployer_account or deployer_private_key deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key) safe_contract = self.get_contract() tx = safe_contract.constructor().buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status contract_address = tx_receipt.contractAddress # Init master copy master_safe = self.get_contract(contract_address) tx = master_safe.functions.setup( # We use 2 owners that nobody controls for the master copy ["0x0000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000003"], 2, # Threshold. Maximum security NULL_ADDRESS, # Address for optional DELEGATE CALL b'', # Data for optional DELEGATE CALL NULL_ADDRESS, # Payment token 0, # Payment NULL_ADDRESS # Refund receiver ).buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status logger.info("Deployed and initialized Safe Master Contract=%s by %s", contract_address, deployer_address) return contract_address
Deploy master contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address
Below is the the instruction that describes the task: ### Input: Deploy master contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address ### Response: def deploy_master_contract(self, deployer_account=None, deployer_private_key=None) -> str: """ Deploy master contract. Takes deployer_account (if unlocked in the node) or the deployer private key :param deployer_account: Unlocked ethereum account :param deployer_private_key: Private key of an ethereum account :return: deployed contract address """ assert deployer_account or deployer_private_key deployer_address = deployer_account or self.ethereum_client.private_key_to_address(deployer_private_key) safe_contract = self.get_contract() tx = safe_contract.constructor().buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status contract_address = tx_receipt.contractAddress # Init master copy master_safe = self.get_contract(contract_address) tx = master_safe.functions.setup( # We use 2 owners that nobody controls for the master copy ["0x0000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000003"], 2, # Threshold. Maximum security NULL_ADDRESS, # Address for optional DELEGATE CALL b'', # Data for optional DELEGATE CALL NULL_ADDRESS, # Payment token 0, # Payment NULL_ADDRESS # Refund receiver ).buildTransaction({'from': deployer_address}) tx_hash = self.ethereum_client.send_unsigned_transaction(tx, private_key=deployer_private_key, public_key=deployer_account) tx_receipt = self.ethereum_client.get_transaction_receipt(tx_hash, timeout=60) assert tx_receipt.status logger.info("Deployed and initialized Safe Master Contract=%s by %s", contract_address, deployer_address) return contract_address
def sI(qubit: Qubit, coefficient: complex = 1.0) -> Pauli: """Return the Pauli sigma_I (identity) operator. The qubit is irrelevant, but kept as an argument for consistency""" return Pauli.sigma(qubit, 'I', coefficient)
Return the Pauli sigma_I (identity) operator. The qubit is irrelevant, but kept as an argument for consistency
Below is the the instruction that describes the task: ### Input: Return the Pauli sigma_I (identity) operator. The qubit is irrelevant, but kept as an argument for consistency ### Response: def sI(qubit: Qubit, coefficient: complex = 1.0) -> Pauli: """Return the Pauli sigma_I (identity) operator. The qubit is irrelevant, but kept as an argument for consistency""" return Pauli.sigma(qubit, 'I', coefficient)
def find_unique_identity(session, uuid): """Find a unique identity. Find a unique identity by its UUID using the given `session`. When the unique identity does not exist the function will return `None`. :param session: database session :param uuid: id of the unique identity to find :returns: a unique identity object; `None` when the unique identity does not exist """ uidentity = session.query(UniqueIdentity). \ filter(UniqueIdentity.uuid == uuid).first() return uidentity
Find a unique identity. Find a unique identity by its UUID using the given `session`. When the unique identity does not exist the function will return `None`. :param session: database session :param uuid: id of the unique identity to find :returns: a unique identity object; `None` when the unique identity does not exist
Below is the the instruction that describes the task: ### Input: Find a unique identity. Find a unique identity by its UUID using the given `session`. When the unique identity does not exist the function will return `None`. :param session: database session :param uuid: id of the unique identity to find :returns: a unique identity object; `None` when the unique identity does not exist ### Response: def find_unique_identity(session, uuid): """Find a unique identity. Find a unique identity by its UUID using the given `session`. When the unique identity does not exist the function will return `None`. :param session: database session :param uuid: id of the unique identity to find :returns: a unique identity object; `None` when the unique identity does not exist """ uidentity = session.query(UniqueIdentity). \ filter(UniqueIdentity.uuid == uuid).first() return uidentity
def verify_path(path, is_collection): """Verifies that a ``path`` has the correct form. Checks that all of the elements in ``path`` are strings. Args: path (Tuple[str, ...]): The components in a collection or document path. is_collection (bool): Indicates if the ``path`` represents a document or a collection. Raises: ValueError: if * the ``path`` is empty * ``is_collection=True`` and there are an even number of elements * ``is_collection=False`` and there are an odd number of elements * an element is not a string """ num_elements = len(path) if num_elements == 0: raise ValueError("Document or collection path cannot be empty") if is_collection: if num_elements % 2 == 0: raise ValueError("A collection must have an odd number of path elements") else: if num_elements % 2 == 1: raise ValueError("A document must have an even number of path elements") for element in path: if not isinstance(element, six.string_types): msg = BAD_PATH_TEMPLATE.format(element, type(element)) raise ValueError(msg)
Verifies that a ``path`` has the correct form. Checks that all of the elements in ``path`` are strings. Args: path (Tuple[str, ...]): The components in a collection or document path. is_collection (bool): Indicates if the ``path`` represents a document or a collection. Raises: ValueError: if * the ``path`` is empty * ``is_collection=True`` and there are an even number of elements * ``is_collection=False`` and there are an odd number of elements * an element is not a string
Below is the the instruction that describes the task: ### Input: Verifies that a ``path`` has the correct form. Checks that all of the elements in ``path`` are strings. Args: path (Tuple[str, ...]): The components in a collection or document path. is_collection (bool): Indicates if the ``path`` represents a document or a collection. Raises: ValueError: if * the ``path`` is empty * ``is_collection=True`` and there are an even number of elements * ``is_collection=False`` and there are an odd number of elements * an element is not a string ### Response: def verify_path(path, is_collection): """Verifies that a ``path`` has the correct form. Checks that all of the elements in ``path`` are strings. Args: path (Tuple[str, ...]): The components in a collection or document path. is_collection (bool): Indicates if the ``path`` represents a document or a collection. Raises: ValueError: if * the ``path`` is empty * ``is_collection=True`` and there are an even number of elements * ``is_collection=False`` and there are an odd number of elements * an element is not a string """ num_elements = len(path) if num_elements == 0: raise ValueError("Document or collection path cannot be empty") if is_collection: if num_elements % 2 == 0: raise ValueError("A collection must have an odd number of path elements") else: if num_elements % 2 == 1: raise ValueError("A document must have an even number of path elements") for element in path: if not isinstance(element, six.string_types): msg = BAD_PATH_TEMPLATE.format(element, type(element)) raise ValueError(msg)
def input_from_blif(blif, block=None, merge_io_vectors=True): """ Read an open blif file or string as input, updating the block appropriately Assumes the blif has been flattened and their is only a single module. Assumes that there is only one single shared clock and reset Assumes that output is generated by Yosys with formals in a particular order Ignores reset signal (which it assumes is input only to the flip flops) """ import pyparsing import six from pyparsing import (Word, Literal, OneOrMore, ZeroOrMore, Suppress, Group, Keyword) block = working_block(block) try: blif_string = blif.read() except AttributeError: if isinstance(blif, six.string_types): blif_string = blif else: raise PyrtlError('input_blif expecting either open file or string') def SKeyword(x): return Suppress(Keyword(x)) def SLiteral(x): return Suppress(Literal(x)) def twire(x): """ find or make wire named x and return it """ s = block.get_wirevector_by_name(x) if s is None: s = WireVector(bitwidth=1, name=x) return s # Begin BLIF language definition signal_start = pyparsing.alphas + '$:[]_<>\\\/' signal_middle = pyparsing.alphas + pyparsing.nums + '$:[]_<>\\\/.' signal_id = Word(signal_start, signal_middle) header = SKeyword('.model') + signal_id('model_name') input_list = Group(SKeyword('.inputs') + OneOrMore(signal_id))('input_list') output_list = Group(SKeyword('.outputs') + OneOrMore(signal_id))('output_list') cover_atom = Word('01-') cover_list = Group(ZeroOrMore(cover_atom))('cover_list') namesignal_list = Group(OneOrMore(signal_id))('namesignal_list') name_def = Group(SKeyword('.names') + namesignal_list + cover_list)('name_def') # asynchronous Flip-flop dffas_formal = (SLiteral('C=') + signal_id('C') + SLiteral('R=') + signal_id('R') + SLiteral('D=') + signal_id('D') + SLiteral('Q=') + signal_id('Q')) dffas_keyword = SKeyword('$_DFF_PN0_') | SKeyword('$_DFF_PP0_') dffas_def = Group(SKeyword('.subckt') + dffas_keyword + dffas_formal)('dffas_def') # synchronous Flip-flop dffs_def = Group(SKeyword('.latch') + signal_id('D') + signal_id('Q') + SLiteral('re') + signal_id('C'))('dffs_def') command_def = name_def | dffas_def | dffs_def command_list = Group(OneOrMore(command_def))('command_list') footer = SKeyword('.end') model_def = Group(header + input_list + output_list + command_list + footer) model_list = OneOrMore(model_def) parser = model_list.ignore(pyparsing.pythonStyleComment) # Begin actually reading and parsing the BLIF file result = parser.parseString(blif_string, parseAll=True) # Blif file with multiple models (currently only handles one flattened models) assert(len(result) == 1) clk_set = set([]) ff_clk_set = set([]) def extract_inputs(model): start_names = [re.sub(r'\[([0-9]+)\]$', '', x) for x in model['input_list']] name_counts = collections.Counter(start_names) for input_name in name_counts: bitwidth = name_counts[input_name] if input_name == 'clk': clk_set.add(input_name) elif not merge_io_vectors or bitwidth == 1: block.add_wirevector(Input(bitwidth=1, name=input_name)) else: wire_in = Input(bitwidth=bitwidth, name=input_name, block=block) for i in range(bitwidth): bit_name = input_name + '[' + str(i) + ']' bit_wire = WireVector(bitwidth=1, name=bit_name, block=block) bit_wire <<= wire_in[i] def extract_outputs(model): start_names = [re.sub(r'\[([0-9]+)\]$', '', x) for x in model['output_list']] name_counts = collections.Counter(start_names) for output_name in name_counts: bitwidth = name_counts[output_name] if not merge_io_vectors or bitwidth == 1: block.add_wirevector(Output(bitwidth=1, name=output_name)) else: wire_out = Output(bitwidth=bitwidth, name=output_name, block=block) bit_list = [] for i in range(bitwidth): bit_name = output_name + '[' + str(i) + ']' bit_wire = WireVector(bitwidth=1, name=bit_name, block=block) bit_list.append(bit_wire) wire_out <<= concat(*bit_list) def extract_commands(model): # for each "command" (dff or net) in the model for command in model['command_list']: # if it is a net (specified as a cover) if command.getName() == 'name_def': extract_cover(command) # else if the command is a d flop flop elif command.getName() == 'dffas_def' or command.getName() == 'dffs_def': extract_flop(command) else: raise PyrtlError('unknown command type') def extract_cover(command): netio = command['namesignal_list'] if len(command['cover_list']) == 0: output_wire = twire(netio[0]) output_wire <<= Const(0, bitwidth=1, block=block) # const "FALSE" elif command['cover_list'].asList() == ['1']: output_wire = twire(netio[0]) output_wire <<= Const(1, bitwidth=1, block=block) # const "TRUE" elif command['cover_list'].asList() == ['1', '1']: # Populate clock list if one input is already a clock if(netio[1] in clk_set): clk_set.add(netio[0]) elif(netio[0] in clk_set): clk_set.add(netio[1]) else: output_wire = twire(netio[1]) output_wire <<= twire(netio[0]) # simple wire elif command['cover_list'].asList() == ['0', '1']: output_wire = twire(netio[1]) output_wire <<= ~ twire(netio[0]) # not gate elif command['cover_list'].asList() == ['11', '1']: output_wire = twire(netio[2]) output_wire <<= twire(netio[0]) & twire(netio[1]) # and gate elif command['cover_list'].asList() == ['00', '1']: output_wire = twire(netio[2]) output_wire <<= ~ (twire(netio[0]) | twire(netio[1])) # nor gate elif command['cover_list'].asList() == ['1-', '1', '-1', '1']: output_wire = twire(netio[2]) output_wire <<= twire(netio[0]) | twire(netio[1]) # or gate elif command['cover_list'].asList() == ['10', '1', '01', '1']: output_wire = twire(netio[2]) output_wire <<= twire(netio[0]) ^ twire(netio[1]) # xor gate elif command['cover_list'].asList() == ['1-0', '1', '-11', '1']: output_wire = twire(netio[3]) output_wire <<= (twire(netio[0]) & ~ twire(netio[2])) \ | (twire(netio[1]) & twire(netio[2])) # mux elif command['cover_list'].asList() == ['-00', '1', '0-0', '1']: output_wire = twire(netio[3]) output_wire <<= (~twire(netio[1]) & ~twire(netio[2])) \ | (~twire(netio[0]) & ~twire(netio[2])) else: raise PyrtlError('Blif file with unknown logic cover set "%s"' '(currently gates are hard coded)' % command['cover_list']) def extract_flop(command): if(command['C'] not in ff_clk_set): ff_clk_set.add(command['C']) # Create register and assign next state to D and output to Q regname = command['Q'] + '_reg' flop = Register(bitwidth=1, name=regname) flop.next <<= twire(command['D']) flop_output = twire(command['Q']) flop_output <<= flop for model in result: extract_inputs(model) extract_outputs(model) extract_commands(model)
Read an open blif file or string as input, updating the block appropriately Assumes the blif has been flattened and their is only a single module. Assumes that there is only one single shared clock and reset Assumes that output is generated by Yosys with formals in a particular order Ignores reset signal (which it assumes is input only to the flip flops)
Below is the the instruction that describes the task: ### Input: Read an open blif file or string as input, updating the block appropriately Assumes the blif has been flattened and their is only a single module. Assumes that there is only one single shared clock and reset Assumes that output is generated by Yosys with formals in a particular order Ignores reset signal (which it assumes is input only to the flip flops) ### Response: def input_from_blif(blif, block=None, merge_io_vectors=True): """ Read an open blif file or string as input, updating the block appropriately Assumes the blif has been flattened and their is only a single module. Assumes that there is only one single shared clock and reset Assumes that output is generated by Yosys with formals in a particular order Ignores reset signal (which it assumes is input only to the flip flops) """ import pyparsing import six from pyparsing import (Word, Literal, OneOrMore, ZeroOrMore, Suppress, Group, Keyword) block = working_block(block) try: blif_string = blif.read() except AttributeError: if isinstance(blif, six.string_types): blif_string = blif else: raise PyrtlError('input_blif expecting either open file or string') def SKeyword(x): return Suppress(Keyword(x)) def SLiteral(x): return Suppress(Literal(x)) def twire(x): """ find or make wire named x and return it """ s = block.get_wirevector_by_name(x) if s is None: s = WireVector(bitwidth=1, name=x) return s # Begin BLIF language definition signal_start = pyparsing.alphas + '$:[]_<>\\\/' signal_middle = pyparsing.alphas + pyparsing.nums + '$:[]_<>\\\/.' signal_id = Word(signal_start, signal_middle) header = SKeyword('.model') + signal_id('model_name') input_list = Group(SKeyword('.inputs') + OneOrMore(signal_id))('input_list') output_list = Group(SKeyword('.outputs') + OneOrMore(signal_id))('output_list') cover_atom = Word('01-') cover_list = Group(ZeroOrMore(cover_atom))('cover_list') namesignal_list = Group(OneOrMore(signal_id))('namesignal_list') name_def = Group(SKeyword('.names') + namesignal_list + cover_list)('name_def') # asynchronous Flip-flop dffas_formal = (SLiteral('C=') + signal_id('C') + SLiteral('R=') + signal_id('R') + SLiteral('D=') + signal_id('D') + SLiteral('Q=') + signal_id('Q')) dffas_keyword = SKeyword('$_DFF_PN0_') | SKeyword('$_DFF_PP0_') dffas_def = Group(SKeyword('.subckt') + dffas_keyword + dffas_formal)('dffas_def') # synchronous Flip-flop dffs_def = Group(SKeyword('.latch') + signal_id('D') + signal_id('Q') + SLiteral('re') + signal_id('C'))('dffs_def') command_def = name_def | dffas_def | dffs_def command_list = Group(OneOrMore(command_def))('command_list') footer = SKeyword('.end') model_def = Group(header + input_list + output_list + command_list + footer) model_list = OneOrMore(model_def) parser = model_list.ignore(pyparsing.pythonStyleComment) # Begin actually reading and parsing the BLIF file result = parser.parseString(blif_string, parseAll=True) # Blif file with multiple models (currently only handles one flattened models) assert(len(result) == 1) clk_set = set([]) ff_clk_set = set([]) def extract_inputs(model): start_names = [re.sub(r'\[([0-9]+)\]$', '', x) for x in model['input_list']] name_counts = collections.Counter(start_names) for input_name in name_counts: bitwidth = name_counts[input_name] if input_name == 'clk': clk_set.add(input_name) elif not merge_io_vectors or bitwidth == 1: block.add_wirevector(Input(bitwidth=1, name=input_name)) else: wire_in = Input(bitwidth=bitwidth, name=input_name, block=block) for i in range(bitwidth): bit_name = input_name + '[' + str(i) + ']' bit_wire = WireVector(bitwidth=1, name=bit_name, block=block) bit_wire <<= wire_in[i] def extract_outputs(model): start_names = [re.sub(r'\[([0-9]+)\]$', '', x) for x in model['output_list']] name_counts = collections.Counter(start_names) for output_name in name_counts: bitwidth = name_counts[output_name] if not merge_io_vectors or bitwidth == 1: block.add_wirevector(Output(bitwidth=1, name=output_name)) else: wire_out = Output(bitwidth=bitwidth, name=output_name, block=block) bit_list = [] for i in range(bitwidth): bit_name = output_name + '[' + str(i) + ']' bit_wire = WireVector(bitwidth=1, name=bit_name, block=block) bit_list.append(bit_wire) wire_out <<= concat(*bit_list) def extract_commands(model): # for each "command" (dff or net) in the model for command in model['command_list']: # if it is a net (specified as a cover) if command.getName() == 'name_def': extract_cover(command) # else if the command is a d flop flop elif command.getName() == 'dffas_def' or command.getName() == 'dffs_def': extract_flop(command) else: raise PyrtlError('unknown command type') def extract_cover(command): netio = command['namesignal_list'] if len(command['cover_list']) == 0: output_wire = twire(netio[0]) output_wire <<= Const(0, bitwidth=1, block=block) # const "FALSE" elif command['cover_list'].asList() == ['1']: output_wire = twire(netio[0]) output_wire <<= Const(1, bitwidth=1, block=block) # const "TRUE" elif command['cover_list'].asList() == ['1', '1']: # Populate clock list if one input is already a clock if(netio[1] in clk_set): clk_set.add(netio[0]) elif(netio[0] in clk_set): clk_set.add(netio[1]) else: output_wire = twire(netio[1]) output_wire <<= twire(netio[0]) # simple wire elif command['cover_list'].asList() == ['0', '1']: output_wire = twire(netio[1]) output_wire <<= ~ twire(netio[0]) # not gate elif command['cover_list'].asList() == ['11', '1']: output_wire = twire(netio[2]) output_wire <<= twire(netio[0]) & twire(netio[1]) # and gate elif command['cover_list'].asList() == ['00', '1']: output_wire = twire(netio[2]) output_wire <<= ~ (twire(netio[0]) | twire(netio[1])) # nor gate elif command['cover_list'].asList() == ['1-', '1', '-1', '1']: output_wire = twire(netio[2]) output_wire <<= twire(netio[0]) | twire(netio[1]) # or gate elif command['cover_list'].asList() == ['10', '1', '01', '1']: output_wire = twire(netio[2]) output_wire <<= twire(netio[0]) ^ twire(netio[1]) # xor gate elif command['cover_list'].asList() == ['1-0', '1', '-11', '1']: output_wire = twire(netio[3]) output_wire <<= (twire(netio[0]) & ~ twire(netio[2])) \ | (twire(netio[1]) & twire(netio[2])) # mux elif command['cover_list'].asList() == ['-00', '1', '0-0', '1']: output_wire = twire(netio[3]) output_wire <<= (~twire(netio[1]) & ~twire(netio[2])) \ | (~twire(netio[0]) & ~twire(netio[2])) else: raise PyrtlError('Blif file with unknown logic cover set "%s"' '(currently gates are hard coded)' % command['cover_list']) def extract_flop(command): if(command['C'] not in ff_clk_set): ff_clk_set.add(command['C']) # Create register and assign next state to D and output to Q regname = command['Q'] + '_reg' flop = Register(bitwidth=1, name=regname) flop.next <<= twire(command['D']) flop_output = twire(command['Q']) flop_output <<= flop for model in result: extract_inputs(model) extract_outputs(model) extract_commands(model)
def pipecmd (cmd1, cmd2): """Return output of "cmd1 | cmd2".""" p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE) p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE) p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. return p2.communicate()[0]
Return output of "cmd1 | cmd2".
Below is the the instruction that describes the task: ### Input: Return output of "cmd1 | cmd2". ### Response: def pipecmd (cmd1, cmd2): """Return output of "cmd1 | cmd2".""" p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE) p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE) p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits. return p2.communicate()[0]
def update(self, quote_id, product_data, store_view=None): """ Allows you to update one or several products in the shopping cart (quote). :param quote_id: Shopping cart ID (quote ID) :param product_data, list of dicts of product details, see def add() :param store_view: Store view ID or code :return: boolean, True if the product is updated . """ return bool( self.call('cart_product.update', [quote_id, product_data, store_view]) )
Allows you to update one or several products in the shopping cart (quote). :param quote_id: Shopping cart ID (quote ID) :param product_data, list of dicts of product details, see def add() :param store_view: Store view ID or code :return: boolean, True if the product is updated .
Below is the the instruction that describes the task: ### Input: Allows you to update one or several products in the shopping cart (quote). :param quote_id: Shopping cart ID (quote ID) :param product_data, list of dicts of product details, see def add() :param store_view: Store view ID or code :return: boolean, True if the product is updated . ### Response: def update(self, quote_id, product_data, store_view=None): """ Allows you to update one or several products in the shopping cart (quote). :param quote_id: Shopping cart ID (quote ID) :param product_data, list of dicts of product details, see def add() :param store_view: Store view ID or code :return: boolean, True if the product is updated . """ return bool( self.call('cart_product.update', [quote_id, product_data, store_view]) )
def wcomplex(wave): r""" Convert a waveform's dependent variable vector to complex. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.wcomplex :raises: RuntimeError (Argument \`wave\` is not valid) .. [[[end]]] """ ret = copy.copy(wave) ret._dep_vector = ret._dep_vector.astype(np.complex) return ret
r""" Convert a waveform's dependent variable vector to complex. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.wcomplex :raises: RuntimeError (Argument \`wave\` is not valid) .. [[[end]]]
Below is the the instruction that describes the task: ### Input: r""" Convert a waveform's dependent variable vector to complex. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.wcomplex :raises: RuntimeError (Argument \`wave\` is not valid) .. [[[end]]] ### Response: def wcomplex(wave): r""" Convert a waveform's dependent variable vector to complex. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.wcomplex :raises: RuntimeError (Argument \`wave\` is not valid) .. [[[end]]] """ ret = copy.copy(wave) ret._dep_vector = ret._dep_vector.astype(np.complex) return ret
def _autodetect_fs(self, cmd="dir", pattern=r"Directory of (.*)/"): """Autodetect the file system on the remote device. Used by SCP operations.""" if not self.check_enable_mode(): raise ValueError("Must be in enable mode to auto-detect the file-system.") output = self.send_command_expect(cmd) match = re.search(pattern, output) if match: file_system = match.group(1) # Test file_system cmd = "dir {}".format(file_system) output = self.send_command_expect(cmd) if "% Invalid" in output or "%Error:" in output: raise ValueError( "An error occurred in dynamically determining remote file " "system: {} {}".format(cmd, output) ) else: return file_system raise ValueError( "An error occurred in dynamically determining remote file " "system: {} {}".format(cmd, output) )
Autodetect the file system on the remote device. Used by SCP operations.
Below is the the instruction that describes the task: ### Input: Autodetect the file system on the remote device. Used by SCP operations. ### Response: def _autodetect_fs(self, cmd="dir", pattern=r"Directory of (.*)/"): """Autodetect the file system on the remote device. Used by SCP operations.""" if not self.check_enable_mode(): raise ValueError("Must be in enable mode to auto-detect the file-system.") output = self.send_command_expect(cmd) match = re.search(pattern, output) if match: file_system = match.group(1) # Test file_system cmd = "dir {}".format(file_system) output = self.send_command_expect(cmd) if "% Invalid" in output or "%Error:" in output: raise ValueError( "An error occurred in dynamically determining remote file " "system: {} {}".format(cmd, output) ) else: return file_system raise ValueError( "An error occurred in dynamically determining remote file " "system: {} {}".format(cmd, output) )
def transition(self): """ Implements a QAM-like transition. This function assumes ``program`` and ``program_counter`` instance variables are set appropriately, and that the wavefunction simulator and classical memory ``ram`` instance variables are in the desired QAM input state. :return: whether the QAM should halt after this transition. """ instruction = self.program[self.program_counter] if isinstance(instruction, Gate): if instruction.name in self.defined_gates: self.wf_simulator.do_gate_matrix(matrix=self.defined_gates[instruction.name], qubits=[q.index for q in instruction.qubits]) else: self.wf_simulator.do_gate(gate=instruction) for noise_type, noise_prob in self.post_gate_noise_probabilities.items(): self.wf_simulator.do_post_gate_noise(noise_type, noise_prob, qubits=[q.index for q in instruction.qubits]) self.program_counter += 1 elif isinstance(instruction, Measurement): measured_val = self.wf_simulator.do_measurement(qubit=instruction.qubit.index) x = instruction.classical_reg # type: MemoryReference self.ram[x.name][x.offset] = measured_val self.program_counter += 1 elif isinstance(instruction, Declare): if instruction.shared_region is not None: raise NotImplementedError("SHARING is not (yet) implemented.") self.ram[instruction.name] = np.zeros(instruction.memory_size, dtype=QUIL_TO_NUMPY_DTYPE[ instruction.memory_type]) self.program_counter += 1 elif isinstance(instruction, Pragma): # TODO: more stringent checks for what's being pragma'd and warnings self.program_counter += 1 elif isinstance(instruction, Jump): # unconditional Jump; go directly to Label self.program_counter = self.find_label(instruction.target) elif isinstance(instruction, JumpTarget): # Label; pass straight over self.program_counter += 1 elif isinstance(instruction, JumpConditional): # JumpConditional; check classical reg x = instruction.condition # type: MemoryReference cond = self.ram[x.name][x.offset] if not isinstance(cond, (bool, np.bool, np.int8)): raise ValueError("{} requires a data type of BIT; not {}" .format(instruction.op, type(cond))) dest_index = self.find_label(instruction.target) if isinstance(instruction, JumpWhen): jump_if_cond = True elif isinstance(instruction, JumpUnless): jump_if_cond = False else: raise TypeError("Invalid JumpConditional") if not (cond ^ jump_if_cond): # jumping: set prog counter to JumpTarget self.program_counter = dest_index else: # not jumping: hop over this JumpConditional self.program_counter += 1 elif isinstance(instruction, UnaryClassicalInstruction): # UnaryClassicalInstruction; set classical reg target = instruction.target # type:MemoryReference old = self.ram[target.name][target.offset] if isinstance(instruction, ClassicalNeg): if not isinstance(old, (int, float, np.int, np.float)): raise ValueError("NEG requires a data type of REAL or INTEGER; not {}" .format(type(old))) self.ram[target.name][target.offset] *= -1 elif isinstance(instruction, ClassicalNot): if not isinstance(old, (bool, np.bool)): raise ValueError("NOT requires a data type of BIT; not {}" .format(type(old))) self.ram[target.name][target.offset] = not old else: raise TypeError("Invalid UnaryClassicalInstruction") self.program_counter += 1 elif isinstance(instruction, (LogicalBinaryOp, ArithmeticBinaryOp, ClassicalMove)): left_ind = instruction.left # type: MemoryReference left_val = self.ram[left_ind.name][left_ind.offset] if isinstance(instruction.right, MemoryReference): right_ind = instruction.right # type: MemoryReference right_val = self.ram[right_ind.name][right_ind.offset] else: right_val = instruction.right if isinstance(instruction, ClassicalAnd): new_val = left_val & right_val elif isinstance(instruction, ClassicalInclusiveOr): new_val = left_val | right_val elif isinstance(instruction, ClassicalExclusiveOr): new_val = left_val ^ right_val elif isinstance(instruction, ClassicalAdd): new_val = left_val + right_val elif isinstance(instruction, ClassicalSub): new_val = left_val - right_val elif isinstance(instruction, ClassicalMul): new_val = left_val * right_val elif isinstance(instruction, ClassicalDiv): new_val = left_val / right_val elif isinstance(instruction, ClassicalMove): new_val = right_val else: raise ValueError("Unknown BinaryOp {}".format(type(instruction))) self.ram[left_ind.name][left_ind.offset] = new_val self.program_counter += 1 elif isinstance(instruction, ClassicalExchange): left_ind = instruction.left # type: MemoryReference right_ind = instruction.right # type: MemoryReference tmp = self.ram[left_ind.name][left_ind.offset] self.ram[left_ind.name][left_ind.offset] = self.ram[right_ind.name][right_ind.offset] self.ram[right_ind.name][right_ind.offset] = tmp self.program_counter += 1 elif isinstance(instruction, Reset): self.wf_simulator.reset() self.program_counter += 1 elif isinstance(instruction, ResetQubit): # TODO raise NotImplementedError("Need to implement in wf simulator") self.program_counter += 1 elif isinstance(instruction, Wait): warnings.warn("WAIT does nothing for a noiseless simulator") self.program_counter += 1 elif isinstance(instruction, Nop): # well that was easy self.program_counter += 1 elif isinstance(instruction, DefGate): if instruction.parameters is not None and len(instruction.parameters) > 0: raise NotImplementedError("PyQVM does not support parameterized DEFGATEs") self.defined_gates[instruction.name] = instruction.name self.program_counter += 1 elif isinstance(instruction, RawInstr): raise NotImplementedError("PyQVM does not support raw instructions. " "Parse your program") elif isinstance(instruction, Halt): return True else: raise ValueError("Unsupported instruction type: {}".format(instruction)) # return HALTED (i.e. program_counter is end of program) return self.program_counter == len(self.program)
Implements a QAM-like transition. This function assumes ``program`` and ``program_counter`` instance variables are set appropriately, and that the wavefunction simulator and classical memory ``ram`` instance variables are in the desired QAM input state. :return: whether the QAM should halt after this transition.
Below is the the instruction that describes the task: ### Input: Implements a QAM-like transition. This function assumes ``program`` and ``program_counter`` instance variables are set appropriately, and that the wavefunction simulator and classical memory ``ram`` instance variables are in the desired QAM input state. :return: whether the QAM should halt after this transition. ### Response: def transition(self): """ Implements a QAM-like transition. This function assumes ``program`` and ``program_counter`` instance variables are set appropriately, and that the wavefunction simulator and classical memory ``ram`` instance variables are in the desired QAM input state. :return: whether the QAM should halt after this transition. """ instruction = self.program[self.program_counter] if isinstance(instruction, Gate): if instruction.name in self.defined_gates: self.wf_simulator.do_gate_matrix(matrix=self.defined_gates[instruction.name], qubits=[q.index for q in instruction.qubits]) else: self.wf_simulator.do_gate(gate=instruction) for noise_type, noise_prob in self.post_gate_noise_probabilities.items(): self.wf_simulator.do_post_gate_noise(noise_type, noise_prob, qubits=[q.index for q in instruction.qubits]) self.program_counter += 1 elif isinstance(instruction, Measurement): measured_val = self.wf_simulator.do_measurement(qubit=instruction.qubit.index) x = instruction.classical_reg # type: MemoryReference self.ram[x.name][x.offset] = measured_val self.program_counter += 1 elif isinstance(instruction, Declare): if instruction.shared_region is not None: raise NotImplementedError("SHARING is not (yet) implemented.") self.ram[instruction.name] = np.zeros(instruction.memory_size, dtype=QUIL_TO_NUMPY_DTYPE[ instruction.memory_type]) self.program_counter += 1 elif isinstance(instruction, Pragma): # TODO: more stringent checks for what's being pragma'd and warnings self.program_counter += 1 elif isinstance(instruction, Jump): # unconditional Jump; go directly to Label self.program_counter = self.find_label(instruction.target) elif isinstance(instruction, JumpTarget): # Label; pass straight over self.program_counter += 1 elif isinstance(instruction, JumpConditional): # JumpConditional; check classical reg x = instruction.condition # type: MemoryReference cond = self.ram[x.name][x.offset] if not isinstance(cond, (bool, np.bool, np.int8)): raise ValueError("{} requires a data type of BIT; not {}" .format(instruction.op, type(cond))) dest_index = self.find_label(instruction.target) if isinstance(instruction, JumpWhen): jump_if_cond = True elif isinstance(instruction, JumpUnless): jump_if_cond = False else: raise TypeError("Invalid JumpConditional") if not (cond ^ jump_if_cond): # jumping: set prog counter to JumpTarget self.program_counter = dest_index else: # not jumping: hop over this JumpConditional self.program_counter += 1 elif isinstance(instruction, UnaryClassicalInstruction): # UnaryClassicalInstruction; set classical reg target = instruction.target # type:MemoryReference old = self.ram[target.name][target.offset] if isinstance(instruction, ClassicalNeg): if not isinstance(old, (int, float, np.int, np.float)): raise ValueError("NEG requires a data type of REAL or INTEGER; not {}" .format(type(old))) self.ram[target.name][target.offset] *= -1 elif isinstance(instruction, ClassicalNot): if not isinstance(old, (bool, np.bool)): raise ValueError("NOT requires a data type of BIT; not {}" .format(type(old))) self.ram[target.name][target.offset] = not old else: raise TypeError("Invalid UnaryClassicalInstruction") self.program_counter += 1 elif isinstance(instruction, (LogicalBinaryOp, ArithmeticBinaryOp, ClassicalMove)): left_ind = instruction.left # type: MemoryReference left_val = self.ram[left_ind.name][left_ind.offset] if isinstance(instruction.right, MemoryReference): right_ind = instruction.right # type: MemoryReference right_val = self.ram[right_ind.name][right_ind.offset] else: right_val = instruction.right if isinstance(instruction, ClassicalAnd): new_val = left_val & right_val elif isinstance(instruction, ClassicalInclusiveOr): new_val = left_val | right_val elif isinstance(instruction, ClassicalExclusiveOr): new_val = left_val ^ right_val elif isinstance(instruction, ClassicalAdd): new_val = left_val + right_val elif isinstance(instruction, ClassicalSub): new_val = left_val - right_val elif isinstance(instruction, ClassicalMul): new_val = left_val * right_val elif isinstance(instruction, ClassicalDiv): new_val = left_val / right_val elif isinstance(instruction, ClassicalMove): new_val = right_val else: raise ValueError("Unknown BinaryOp {}".format(type(instruction))) self.ram[left_ind.name][left_ind.offset] = new_val self.program_counter += 1 elif isinstance(instruction, ClassicalExchange): left_ind = instruction.left # type: MemoryReference right_ind = instruction.right # type: MemoryReference tmp = self.ram[left_ind.name][left_ind.offset] self.ram[left_ind.name][left_ind.offset] = self.ram[right_ind.name][right_ind.offset] self.ram[right_ind.name][right_ind.offset] = tmp self.program_counter += 1 elif isinstance(instruction, Reset): self.wf_simulator.reset() self.program_counter += 1 elif isinstance(instruction, ResetQubit): # TODO raise NotImplementedError("Need to implement in wf simulator") self.program_counter += 1 elif isinstance(instruction, Wait): warnings.warn("WAIT does nothing for a noiseless simulator") self.program_counter += 1 elif isinstance(instruction, Nop): # well that was easy self.program_counter += 1 elif isinstance(instruction, DefGate): if instruction.parameters is not None and len(instruction.parameters) > 0: raise NotImplementedError("PyQVM does not support parameterized DEFGATEs") self.defined_gates[instruction.name] = instruction.name self.program_counter += 1 elif isinstance(instruction, RawInstr): raise NotImplementedError("PyQVM does not support raw instructions. " "Parse your program") elif isinstance(instruction, Halt): return True else: raise ValueError("Unsupported instruction type: {}".format(instruction)) # return HALTED (i.e. program_counter is end of program) return self.program_counter == len(self.program)
def shapesides(inputtocheck, inputtype='shape'): """ Get the sides of a shape. inputtocheck: The amount of sides or the shape to be checked, depending on the value of inputtype. inputtype: The type of input provided. Can be: 'shape', 'sides'. """ # Define the array of sides to a shape shapestosides = { 'triangle': 3, 'square': 4, 'pentagon': 5, 'hexagon': 6, 'heptagon': 7, 'octagon': 8, 'nonagon': 9, 'decagon': 10, 'hendecagon': 11, 'dodecagon': 12, 'triskaidecagon': 13, 'tetrakaidecagon': 14, 'pentadecagon': 15, 'hexakaidecagon': 16, 'heptadecagon': 17, 'octakaidecagon': 18, 'enneadecagon': 19, 'icosagon': 20, 'triacontagon': 30, 'tetracontagon': 40, 'pentacontagon': 50, 'hexacontagon': 60, 'heptacontagon': 70, 'octacontagon': 80, 'enneacontagon': 90, 'hectagon': 100, 'chiliagon': 1000, 'myriagon': 10000, 'megagon': 1000000, 'googolgon': pow(10, 100), 'ngon': 'n' } # Define an array with the flipped version of the sides to a shape sidestoshapes = dictflip(shapestosides) # If the lowercase version of the input type is 'shape' if inputtype.lower() == 'shape': # If the lowercase version of the shape is in the array if inputtocheck.lower() in shapestosides: # Return the corresponding sides return shapestosides[inputtocheck.lower()] # Return 'n' return shapestosides['n'] if inputtype.lower() == 'sides': # If the lowercase version of the shape is in the array if inputtocheck.lower() in sidestoshapes: # Return the corresponding sides return sidestoshapes[inputtocheck.lower()] # Return 'ngon' return sidestoshapes['ngon'] # Raise a warning raise ValueError("Invalid input type.")
Get the sides of a shape. inputtocheck: The amount of sides or the shape to be checked, depending on the value of inputtype. inputtype: The type of input provided. Can be: 'shape', 'sides'.
Below is the the instruction that describes the task: ### Input: Get the sides of a shape. inputtocheck: The amount of sides or the shape to be checked, depending on the value of inputtype. inputtype: The type of input provided. Can be: 'shape', 'sides'. ### Response: def shapesides(inputtocheck, inputtype='shape'): """ Get the sides of a shape. inputtocheck: The amount of sides or the shape to be checked, depending on the value of inputtype. inputtype: The type of input provided. Can be: 'shape', 'sides'. """ # Define the array of sides to a shape shapestosides = { 'triangle': 3, 'square': 4, 'pentagon': 5, 'hexagon': 6, 'heptagon': 7, 'octagon': 8, 'nonagon': 9, 'decagon': 10, 'hendecagon': 11, 'dodecagon': 12, 'triskaidecagon': 13, 'tetrakaidecagon': 14, 'pentadecagon': 15, 'hexakaidecagon': 16, 'heptadecagon': 17, 'octakaidecagon': 18, 'enneadecagon': 19, 'icosagon': 20, 'triacontagon': 30, 'tetracontagon': 40, 'pentacontagon': 50, 'hexacontagon': 60, 'heptacontagon': 70, 'octacontagon': 80, 'enneacontagon': 90, 'hectagon': 100, 'chiliagon': 1000, 'myriagon': 10000, 'megagon': 1000000, 'googolgon': pow(10, 100), 'ngon': 'n' } # Define an array with the flipped version of the sides to a shape sidestoshapes = dictflip(shapestosides) # If the lowercase version of the input type is 'shape' if inputtype.lower() == 'shape': # If the lowercase version of the shape is in the array if inputtocheck.lower() in shapestosides: # Return the corresponding sides return shapestosides[inputtocheck.lower()] # Return 'n' return shapestosides['n'] if inputtype.lower() == 'sides': # If the lowercase version of the shape is in the array if inputtocheck.lower() in sidestoshapes: # Return the corresponding sides return sidestoshapes[inputtocheck.lower()] # Return 'ngon' return sidestoshapes['ngon'] # Raise a warning raise ValueError("Invalid input type.")
def text(self): """Formatted Command declaration. This is the C declaration for the command. """ params = ', '.join(x.text for x in self.params) return '{0} ({1})'.format(self.proto_text, params)
Formatted Command declaration. This is the C declaration for the command.
Below is the the instruction that describes the task: ### Input: Formatted Command declaration. This is the C declaration for the command. ### Response: def text(self): """Formatted Command declaration. This is the C declaration for the command. """ params = ', '.join(x.text for x in self.params) return '{0} ({1})'.format(self.proto_text, params)
def set_value(self, value): """Determine the proper value based on the data_type""" if self.data_type == self.BOOLEAN_TYPE: self.value_boolean = bool(value) elif self.data_type == self.NUMBER_TYPE: self.value_number = float(value) else: self.value = value
Determine the proper value based on the data_type
Below is the the instruction that describes the task: ### Input: Determine the proper value based on the data_type ### Response: def set_value(self, value): """Determine the proper value based on the data_type""" if self.data_type == self.BOOLEAN_TYPE: self.value_boolean = bool(value) elif self.data_type == self.NUMBER_TYPE: self.value_number = float(value) else: self.value = value
def _expand_join(join_definition): """Expand join definition to `join' call. :param join_definition: join definition :return: expanded join definition """ join_table_name = join_definition.pop('table') join_func = getattr(mosql_query, join_definition.pop('join_type', 'join')) return join_func(join_table_name, **join_definition)
Expand join definition to `join' call. :param join_definition: join definition :return: expanded join definition
Below is the the instruction that describes the task: ### Input: Expand join definition to `join' call. :param join_definition: join definition :return: expanded join definition ### Response: def _expand_join(join_definition): """Expand join definition to `join' call. :param join_definition: join definition :return: expanded join definition """ join_table_name = join_definition.pop('table') join_func = getattr(mosql_query, join_definition.pop('join_type', 'join')) return join_func(join_table_name, **join_definition)
def get_instance(self, payload): """ Build an instance of TaskQueueRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance """ return TaskQueueRealTimeStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], )
Build an instance of TaskQueueRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance
Below is the the instruction that describes the task: ### Input: Build an instance of TaskQueueRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance ### Response: def get_instance(self, payload): """ Build an instance of TaskQueueRealTimeStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance """ return TaskQueueRealTimeStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], )
def ConsultarCaracteres(self, sep="||"): "Retorna listado de caracteres emisor/receptor (código, descripción)" ret = self.client.consultarCaracteresParticipante( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['respuesta'] self.__analizar_errores(ret) array = ret.get('caracter', []) + ret.get('caracterPorcino', []) if sep is None: return dict([(it['codigo'], it['descripcion']) for it in array]) else: return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigo'], it['descripcion']) for it in array]
Retorna listado de caracteres emisor/receptor (código, descripción)
Below is the the instruction that describes the task: ### Input: Retorna listado de caracteres emisor/receptor (código, descripción) ### Response: def ConsultarCaracteres(self, sep="||"): "Retorna listado de caracteres emisor/receptor (código, descripción)" ret = self.client.consultarCaracteresParticipante( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, )['respuesta'] self.__analizar_errores(ret) array = ret.get('caracter', []) + ret.get('caracterPorcino', []) if sep is None: return dict([(it['codigo'], it['descripcion']) for it in array]) else: return [("%s %%s %s %%s %s" % (sep, sep, sep)) % (it['codigo'], it['descripcion']) for it in array]
def p_expression_unor(self, p): 'expression : NOR expression %prec UNOR' p[0] = Unor(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
expression : NOR expression %prec UNOR
Below is the the instruction that describes the task: ### Input: expression : NOR expression %prec UNOR ### Response: def p_expression_unor(self, p): 'expression : NOR expression %prec UNOR' p[0] = Unor(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
def listunspent( self, address: str="", minconf: int=1, maxconf: int=999999, ) -> list: '''list UTXOs modified version to allow filtering by address. ''' if address: return self.req("listunspent", [minconf, maxconf, [address]]) return self.req("listunspent", [minconf, maxconf])
list UTXOs modified version to allow filtering by address.
Below is the the instruction that describes the task: ### Input: list UTXOs modified version to allow filtering by address. ### Response: def listunspent( self, address: str="", minconf: int=1, maxconf: int=999999, ) -> list: '''list UTXOs modified version to allow filtering by address. ''' if address: return self.req("listunspent", [minconf, maxconf, [address]]) return self.req("listunspent", [minconf, maxconf])
def create_project(self, name, **kwargs): """ Creates a project with a name. All other parameters are optional. They are: `note`, `customer_id`, `budget`, `budget_type`, `active_hourly_rate`, `hourly_rate`, `hourly_rates_per_service`, and `archived`. """ data = self._wrap_dict("project", kwargs) data["customer"]["name"] = name return self.post("/projects.json", data=data)
Creates a project with a name. All other parameters are optional. They are: `note`, `customer_id`, `budget`, `budget_type`, `active_hourly_rate`, `hourly_rate`, `hourly_rates_per_service`, and `archived`.
Below is the the instruction that describes the task: ### Input: Creates a project with a name. All other parameters are optional. They are: `note`, `customer_id`, `budget`, `budget_type`, `active_hourly_rate`, `hourly_rate`, `hourly_rates_per_service`, and `archived`. ### Response: def create_project(self, name, **kwargs): """ Creates a project with a name. All other parameters are optional. They are: `note`, `customer_id`, `budget`, `budget_type`, `active_hourly_rate`, `hourly_rate`, `hourly_rates_per_service`, and `archived`. """ data = self._wrap_dict("project", kwargs) data["customer"]["name"] = name return self.post("/projects.json", data=data)
def get_or_add_bgPr(self): """Return `p:bg/p:bgPr` grandchild. If no such grandchild is present, any existing `p:bg` child is first removed and a new default `p:bg` with noFill settings is added. """ bg = self.bg if bg is None or bg.bgPr is None: self._change_to_noFill_bg() return self.bg.bgPr
Return `p:bg/p:bgPr` grandchild. If no such grandchild is present, any existing `p:bg` child is first removed and a new default `p:bg` with noFill settings is added.
Below is the the instruction that describes the task: ### Input: Return `p:bg/p:bgPr` grandchild. If no such grandchild is present, any existing `p:bg` child is first removed and a new default `p:bg` with noFill settings is added. ### Response: def get_or_add_bgPr(self): """Return `p:bg/p:bgPr` grandchild. If no such grandchild is present, any existing `p:bg` child is first removed and a new default `p:bg` with noFill settings is added. """ bg = self.bg if bg is None or bg.bgPr is None: self._change_to_noFill_bg() return self.bg.bgPr
def run(icon=None): """ .. versionchanged:: 2.0.0 Remove parameter *debug*. """ app = wx.App() frame = myappframe.MyAppFrame(None, u'Lattice Viewer \u2014 Accelerator Online Modeling Tool') frame.SetSize((1024, 768)) frame.Show() if icon is not None: frame.SetIcon(icon) app.MainLoop()
.. versionchanged:: 2.0.0 Remove parameter *debug*.
Below is the the instruction that describes the task: ### Input: .. versionchanged:: 2.0.0 Remove parameter *debug*. ### Response: def run(icon=None): """ .. versionchanged:: 2.0.0 Remove parameter *debug*. """ app = wx.App() frame = myappframe.MyAppFrame(None, u'Lattice Viewer \u2014 Accelerator Online Modeling Tool') frame.SetSize((1024, 768)) frame.Show() if icon is not None: frame.SetIcon(icon) app.MainLoop()
def instruction_ROL_register(self, opcode, register): """ Rotate accumulator left """ a = register.value r = self.ROL(a) # log.debug("$%x ROL %s value $%x << 1 | Carry = $%x" % ( # self.program_counter, # register.name, a, r # )) register.set(r)
Rotate accumulator left
Below is the the instruction that describes the task: ### Input: Rotate accumulator left ### Response: def instruction_ROL_register(self, opcode, register): """ Rotate accumulator left """ a = register.value r = self.ROL(a) # log.debug("$%x ROL %s value $%x << 1 | Carry = $%x" % ( # self.program_counter, # register.name, a, r # )) register.set(r)
def area_fraction_dict(self): """ Returns: (dict): {hkl: area_hkl/total area on wulff} """ return {hkl: self.miller_area_dict[hkl] / self.surface_area for hkl in self.miller_area_dict.keys()}
Returns: (dict): {hkl: area_hkl/total area on wulff}
Below is the the instruction that describes the task: ### Input: Returns: (dict): {hkl: area_hkl/total area on wulff} ### Response: def area_fraction_dict(self): """ Returns: (dict): {hkl: area_hkl/total area on wulff} """ return {hkl: self.miller_area_dict[hkl] / self.surface_area for hkl in self.miller_area_dict.keys()}
def _get_sorted_methods(self, methods): """Get a copy of 'methods' sorted the way they would be on the live server. Args: methods: JSON configuration of an API's methods. Returns: The same configuration with the methods sorted based on what order they'll be checked by the server. """ if not methods: return methods # Comparison function we'll use to sort the methods: def _sorted_methods_comparison(method_info1, method_info2): """Sort method info by path and http_method. Args: method_info1: Method name and info for the first method to compare. method_info2: Method name and info for the method to compare to. Returns: Negative if the first method should come first, positive if the first method should come after the second. Zero if they're equivalent. """ def _score_path(path): """Calculate the score for this path, used for comparisons. Higher scores have priority, and if scores are equal, the path text is sorted alphabetically. Scores are based on the number and location of the constant parts of the path. The server has some special handling for variables with regexes, which we don't handle here. Args: path: The request path that we're calculating a score for. Returns: The score for the given path. """ score = 0 parts = path.split('/') for part in parts: score <<= 1 if not part or part[0] != '{': # Found a constant. score += 1 # Shift by 31 instead of 32 because some (!) versions of Python like # to convert the int to a long if we shift by 32, and the sorted() # function that uses this blows up if it receives anything but an int. score <<= 31 - len(parts) return score # Higher path scores come first. path_score1 = _score_path(method_info1[1].get('path', '')) path_score2 = _score_path(method_info2[1].get('path', '')) if path_score1 != path_score2: return path_score2 - path_score1 # Compare by path text next, sorted alphabetically. path_result = cmp(method_info1[1].get('path', ''), method_info2[1].get('path', '')) if path_result != 0: return path_result # All else being equal, sort by HTTP method. method_result = cmp(method_info1[1].get('httpMethod', ''), method_info2[1].get('httpMethod', '')) return method_result return sorted(methods.items(), _sorted_methods_comparison)
Get a copy of 'methods' sorted the way they would be on the live server. Args: methods: JSON configuration of an API's methods. Returns: The same configuration with the methods sorted based on what order they'll be checked by the server.
Below is the the instruction that describes the task: ### Input: Get a copy of 'methods' sorted the way they would be on the live server. Args: methods: JSON configuration of an API's methods. Returns: The same configuration with the methods sorted based on what order they'll be checked by the server. ### Response: def _get_sorted_methods(self, methods): """Get a copy of 'methods' sorted the way they would be on the live server. Args: methods: JSON configuration of an API's methods. Returns: The same configuration with the methods sorted based on what order they'll be checked by the server. """ if not methods: return methods # Comparison function we'll use to sort the methods: def _sorted_methods_comparison(method_info1, method_info2): """Sort method info by path and http_method. Args: method_info1: Method name and info for the first method to compare. method_info2: Method name and info for the method to compare to. Returns: Negative if the first method should come first, positive if the first method should come after the second. Zero if they're equivalent. """ def _score_path(path): """Calculate the score for this path, used for comparisons. Higher scores have priority, and if scores are equal, the path text is sorted alphabetically. Scores are based on the number and location of the constant parts of the path. The server has some special handling for variables with regexes, which we don't handle here. Args: path: The request path that we're calculating a score for. Returns: The score for the given path. """ score = 0 parts = path.split('/') for part in parts: score <<= 1 if not part or part[0] != '{': # Found a constant. score += 1 # Shift by 31 instead of 32 because some (!) versions of Python like # to convert the int to a long if we shift by 32, and the sorted() # function that uses this blows up if it receives anything but an int. score <<= 31 - len(parts) return score # Higher path scores come first. path_score1 = _score_path(method_info1[1].get('path', '')) path_score2 = _score_path(method_info2[1].get('path', '')) if path_score1 != path_score2: return path_score2 - path_score1 # Compare by path text next, sorted alphabetically. path_result = cmp(method_info1[1].get('path', ''), method_info2[1].get('path', '')) if path_result != 0: return path_result # All else being equal, sort by HTTP method. method_result = cmp(method_info1[1].get('httpMethod', ''), method_info2[1].get('httpMethod', '')) return method_result return sorted(methods.items(), _sorted_methods_comparison)
def _buffer_decode_step(self, input, errors, final): """ There are three possibilities for each decoding step: - Decode as much real UTF-8 as possible. - Decode a six-byte CESU-8 sequence at the current position. - Decode a Java-style null at the current position. This method figures out which step is appropriate, and does it. """ # Get a reference to the superclass method that we'll be using for # most of the real work. sup = UTF8IncrementalDecoder._buffer_decode # Find the next byte position that indicates a variant of UTF-8. match = SPECIAL_BYTES_RE.search(input) if match is None: return sup(input, errors, final) cutoff = match.start() if cutoff > 0: return sup(input[:cutoff], errors, True) # Some byte sequence that we intend to handle specially matches # at the beginning of the input. if input.startswith(b'\xc0'): if len(input) > 1: # Decode the two-byte sequence 0xc0 0x80. return '\u0000', 2 else: if final: # We hit the end of the stream. Let the superclass method # handle it. return sup(input, errors, True) else: # Wait to see another byte. return '', 0 else: # Decode a possible six-byte sequence starting with 0xed. return self._buffer_decode_surrogates(sup, input, errors, final)
There are three possibilities for each decoding step: - Decode as much real UTF-8 as possible. - Decode a six-byte CESU-8 sequence at the current position. - Decode a Java-style null at the current position. This method figures out which step is appropriate, and does it.
Below is the the instruction that describes the task: ### Input: There are three possibilities for each decoding step: - Decode as much real UTF-8 as possible. - Decode a six-byte CESU-8 sequence at the current position. - Decode a Java-style null at the current position. This method figures out which step is appropriate, and does it. ### Response: def _buffer_decode_step(self, input, errors, final): """ There are three possibilities for each decoding step: - Decode as much real UTF-8 as possible. - Decode a six-byte CESU-8 sequence at the current position. - Decode a Java-style null at the current position. This method figures out which step is appropriate, and does it. """ # Get a reference to the superclass method that we'll be using for # most of the real work. sup = UTF8IncrementalDecoder._buffer_decode # Find the next byte position that indicates a variant of UTF-8. match = SPECIAL_BYTES_RE.search(input) if match is None: return sup(input, errors, final) cutoff = match.start() if cutoff > 0: return sup(input[:cutoff], errors, True) # Some byte sequence that we intend to handle specially matches # at the beginning of the input. if input.startswith(b'\xc0'): if len(input) > 1: # Decode the two-byte sequence 0xc0 0x80. return '\u0000', 2 else: if final: # We hit the end of the stream. Let the superclass method # handle it. return sup(input, errors, True) else: # Wait to see another byte. return '', 0 else: # Decode a possible six-byte sequence starting with 0xed. return self._buffer_decode_surrogates(sup, input, errors, final)
def create( cls, model, parent = None, uifile = '', commit = True ): """ Prompts the user to create a new record for the inputed table. :param model | <subclass of orb.Table> parent | <QWidget> :return <orb.Table> || None/ | instance of the inputed table class """ # create the dialog dlg = QDialog(parent) dlg.setWindowTitle('Create %s' % model.schema().name()) # create the widget cls = model.schema().property('widgetClass', cls) widget = cls(dlg) if ( uifile ): widget.setUiFile(uifile) widget.setModel(model) widget.layout().setContentsMargins(0, 0, 0, 0) # create buttons opts = QDialogButtonBox.Save | QDialogButtonBox.Cancel btns = QDialogButtonBox(opts, Qt.Horizontal, dlg) # create layout layout = QVBoxLayout() layout.addWidget(widget) layout.addWidget(btns) dlg.setLayout(layout) dlg.adjustSize() # create connections btns.accepted.connect(widget.save) btns.rejected.connect(dlg.reject) widget.saved.connect(dlg.accept) if ( dlg.exec_() ): record = widget.record() if ( commit ): record.commit() return record return None
Prompts the user to create a new record for the inputed table. :param model | <subclass of orb.Table> parent | <QWidget> :return <orb.Table> || None/ | instance of the inputed table class
Below is the the instruction that describes the task: ### Input: Prompts the user to create a new record for the inputed table. :param model | <subclass of orb.Table> parent | <QWidget> :return <orb.Table> || None/ | instance of the inputed table class ### Response: def create( cls, model, parent = None, uifile = '', commit = True ): """ Prompts the user to create a new record for the inputed table. :param model | <subclass of orb.Table> parent | <QWidget> :return <orb.Table> || None/ | instance of the inputed table class """ # create the dialog dlg = QDialog(parent) dlg.setWindowTitle('Create %s' % model.schema().name()) # create the widget cls = model.schema().property('widgetClass', cls) widget = cls(dlg) if ( uifile ): widget.setUiFile(uifile) widget.setModel(model) widget.layout().setContentsMargins(0, 0, 0, 0) # create buttons opts = QDialogButtonBox.Save | QDialogButtonBox.Cancel btns = QDialogButtonBox(opts, Qt.Horizontal, dlg) # create layout layout = QVBoxLayout() layout.addWidget(widget) layout.addWidget(btns) dlg.setLayout(layout) dlg.adjustSize() # create connections btns.accepted.connect(widget.save) btns.rejected.connect(dlg.reject) widget.saved.connect(dlg.accept) if ( dlg.exec_() ): record = widget.record() if ( commit ): record.commit() return record return None
def stop(self): """ Stop services and requestors and then connection. :return: self """ LOGGER.debug("rabbitmq.Driver.stop") for requester in self.requester_registry: requester.stop() self.requester_registry.clear() for service in self.services_registry: if service.is_started: service.stop() self.services_registry.clear() pykka.ActorRegistry.stop_all() return self
Stop services and requestors and then connection. :return: self
Below is the the instruction that describes the task: ### Input: Stop services and requestors and then connection. :return: self ### Response: def stop(self): """ Stop services and requestors and then connection. :return: self """ LOGGER.debug("rabbitmq.Driver.stop") for requester in self.requester_registry: requester.stop() self.requester_registry.clear() for service in self.services_registry: if service.is_started: service.stop() self.services_registry.clear() pykka.ActorRegistry.stop_all() return self
def node_path_transforms(self, node): """Return the list of transforms along the path to another node. The transforms are listed in reverse order, such that the last transform should be applied first when mapping from this node to the other. Parameters ---------- node : instance of Node The other node. Returns ------- transforms : list A list of Transform instances. """ a, b = self.node_path(node) return ([n.transform for n in a[:-1]] + [n.transform.inverse for n in b])[::-1]
Return the list of transforms along the path to another node. The transforms are listed in reverse order, such that the last transform should be applied first when mapping from this node to the other. Parameters ---------- node : instance of Node The other node. Returns ------- transforms : list A list of Transform instances.
Below is the the instruction that describes the task: ### Input: Return the list of transforms along the path to another node. The transforms are listed in reverse order, such that the last transform should be applied first when mapping from this node to the other. Parameters ---------- node : instance of Node The other node. Returns ------- transforms : list A list of Transform instances. ### Response: def node_path_transforms(self, node): """Return the list of transforms along the path to another node. The transforms are listed in reverse order, such that the last transform should be applied first when mapping from this node to the other. Parameters ---------- node : instance of Node The other node. Returns ------- transforms : list A list of Transform instances. """ a, b = self.node_path(node) return ([n.transform for n in a[:-1]] + [n.transform.inverse for n in b])[::-1]
def loop_template_list(loop_positions, instance, instance_type, default_template, registry): """ Build a list of templates from a position within a loop and a registry of templates. """ templates = [] local_loop_position = loop_positions[1] global_loop_position = loop_positions[0] instance_string = slugify(str(instance)) for key in ['%s-%s' % (instance_type, instance_string), instance_string, instance_type, 'default']: try: templates.append(registry[key][global_loop_position]) except KeyError: pass templates.append( append_position(default_template, global_loop_position, '-')) templates.append( append_position(default_template, local_loop_position, '_')) templates.append(default_template) return templates
Build a list of templates from a position within a loop and a registry of templates.
Below is the the instruction that describes the task: ### Input: Build a list of templates from a position within a loop and a registry of templates. ### Response: def loop_template_list(loop_positions, instance, instance_type, default_template, registry): """ Build a list of templates from a position within a loop and a registry of templates. """ templates = [] local_loop_position = loop_positions[1] global_loop_position = loop_positions[0] instance_string = slugify(str(instance)) for key in ['%s-%s' % (instance_type, instance_string), instance_string, instance_type, 'default']: try: templates.append(registry[key][global_loop_position]) except KeyError: pass templates.append( append_position(default_template, global_loop_position, '-')) templates.append( append_position(default_template, local_loop_position, '_')) templates.append(default_template) return templates
def list(self, **params): """ Retrieve visit outcomes Returns Visit Outcomes, according to the parameters provided :calls: ``get /visit_outcomes`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of VisitOutcomes. :rtype: list """ _, _, visit_outcomes = self.http_client.get("/visit_outcomes", params=params) return visit_outcomes
Retrieve visit outcomes Returns Visit Outcomes, according to the parameters provided :calls: ``get /visit_outcomes`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of VisitOutcomes. :rtype: list
Below is the the instruction that describes the task: ### Input: Retrieve visit outcomes Returns Visit Outcomes, according to the parameters provided :calls: ``get /visit_outcomes`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of VisitOutcomes. :rtype: list ### Response: def list(self, **params): """ Retrieve visit outcomes Returns Visit Outcomes, according to the parameters provided :calls: ``get /visit_outcomes`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of VisitOutcomes. :rtype: list """ _, _, visit_outcomes = self.http_client.get("/visit_outcomes", params=params) return visit_outcomes
def get_neurommsig_scores(graph: BELGraph, genes: List[Gene], annotation: str = 'Subgraph', ora_weight: Optional[float] = None, hub_weight: Optional[float] = None, top_percent: Optional[float] = None, topology_weight: Optional[float] = None, preprocess: bool = False ) -> Optional[Mapping[str, float]]: """Preprocess the graph, stratify by the given annotation, then run the NeuroMMSig algorithm on each. :param graph: A BEL graph :param genes: A list of gene nodes :param annotation: The annotation to use to stratify the graph to subgraphs :param ora_weight: The relative weight of the over-enrichment analysis score from :py:func:`neurommsig_gene_ora`. Defaults to 1.0. :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`. Defaults to 1.0. :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05). :param topology_weight: The relative weight of the topolgical analysis core from :py:func:`neurommsig_topology`. Defaults to 1.0. :param preprocess: If true, preprocess the graph. :return: A dictionary from {annotation value: NeuroMMSig composite score} Pre-processing steps: 1. Infer the central dogma with :func:`` 2. Collapse all proteins, RNAs and miRNAs to genes with :func:`` 3. Collapse variants to genes with :func:`` """ if preprocess: graph = neurommsig_graph_preprocessor.run(graph) if not any(gene in graph for gene in genes): logger.debug('no genes mapping to graph') return subgraphs = get_subgraphs_by_annotation(graph, annotation=annotation) return get_neurommsig_scores_prestratified( subgraphs=subgraphs, genes=genes, ora_weight=ora_weight, hub_weight=hub_weight, top_percent=top_percent, topology_weight=topology_weight, )
Preprocess the graph, stratify by the given annotation, then run the NeuroMMSig algorithm on each. :param graph: A BEL graph :param genes: A list of gene nodes :param annotation: The annotation to use to stratify the graph to subgraphs :param ora_weight: The relative weight of the over-enrichment analysis score from :py:func:`neurommsig_gene_ora`. Defaults to 1.0. :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`. Defaults to 1.0. :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05). :param topology_weight: The relative weight of the topolgical analysis core from :py:func:`neurommsig_topology`. Defaults to 1.0. :param preprocess: If true, preprocess the graph. :return: A dictionary from {annotation value: NeuroMMSig composite score} Pre-processing steps: 1. Infer the central dogma with :func:`` 2. Collapse all proteins, RNAs and miRNAs to genes with :func:`` 3. Collapse variants to genes with :func:``
Below is the the instruction that describes the task: ### Input: Preprocess the graph, stratify by the given annotation, then run the NeuroMMSig algorithm on each. :param graph: A BEL graph :param genes: A list of gene nodes :param annotation: The annotation to use to stratify the graph to subgraphs :param ora_weight: The relative weight of the over-enrichment analysis score from :py:func:`neurommsig_gene_ora`. Defaults to 1.0. :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`. Defaults to 1.0. :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05). :param topology_weight: The relative weight of the topolgical analysis core from :py:func:`neurommsig_topology`. Defaults to 1.0. :param preprocess: If true, preprocess the graph. :return: A dictionary from {annotation value: NeuroMMSig composite score} Pre-processing steps: 1. Infer the central dogma with :func:`` 2. Collapse all proteins, RNAs and miRNAs to genes with :func:`` 3. Collapse variants to genes with :func:`` ### Response: def get_neurommsig_scores(graph: BELGraph, genes: List[Gene], annotation: str = 'Subgraph', ora_weight: Optional[float] = None, hub_weight: Optional[float] = None, top_percent: Optional[float] = None, topology_weight: Optional[float] = None, preprocess: bool = False ) -> Optional[Mapping[str, float]]: """Preprocess the graph, stratify by the given annotation, then run the NeuroMMSig algorithm on each. :param graph: A BEL graph :param genes: A list of gene nodes :param annotation: The annotation to use to stratify the graph to subgraphs :param ora_weight: The relative weight of the over-enrichment analysis score from :py:func:`neurommsig_gene_ora`. Defaults to 1.0. :param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`. Defaults to 1.0. :param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05). :param topology_weight: The relative weight of the topolgical analysis core from :py:func:`neurommsig_topology`. Defaults to 1.0. :param preprocess: If true, preprocess the graph. :return: A dictionary from {annotation value: NeuroMMSig composite score} Pre-processing steps: 1. Infer the central dogma with :func:`` 2. Collapse all proteins, RNAs and miRNAs to genes with :func:`` 3. Collapse variants to genes with :func:`` """ if preprocess: graph = neurommsig_graph_preprocessor.run(graph) if not any(gene in graph for gene in genes): logger.debug('no genes mapping to graph') return subgraphs = get_subgraphs_by_annotation(graph, annotation=annotation) return get_neurommsig_scores_prestratified( subgraphs=subgraphs, genes=genes, ora_weight=ora_weight, hub_weight=hub_weight, top_percent=top_percent, topology_weight=topology_weight, )
def email_confirm(request, confirmation_key, template_name='accounts/email_confirm_fail.html', success_url=None, extra_context=None): """ Confirms an email address with a confirmation key. Confirms a new email address by running :func:`User.objects.confirm_email` method. If the method returns an :class:`User` the user will have his new e-mail address set and redirected to ``success_url``. If no ``User`` is returned the user will be represented with a fail message from ``template_name``. :param confirmation_key: String with a SHA1 representing the confirmation key used to verify a new email address. :param template_name: String containing the template name which should be rendered when confirmation fails. When confirmation is successful, no template is needed because the user will be redirected to ``success_url``. :param success_url: String containing the URL which is redirected to after a successful confirmation. Supplied argument must be able to be rendered by ``reverse`` function. :param extra_context: Dictionary of variables that are passed on to the template supplied by ``template_name``. """ user = AccountsSignup.objects.confirm_email(confirmation_key) if user: if accounts_settings.ACCOUNTS_USE_MESSAGES: messages.success(request, _('Your email address has been changed.'), fail_silently=True) if success_url: redirect_to = success_url else: redirect_to = reverse('accounts_email_confirm_complete', kwargs={'username': user.username}) return redirect(redirect_to) else: if not extra_context: extra_context = dict() return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
Confirms an email address with a confirmation key. Confirms a new email address by running :func:`User.objects.confirm_email` method. If the method returns an :class:`User` the user will have his new e-mail address set and redirected to ``success_url``. If no ``User`` is returned the user will be represented with a fail message from ``template_name``. :param confirmation_key: String with a SHA1 representing the confirmation key used to verify a new email address. :param template_name: String containing the template name which should be rendered when confirmation fails. When confirmation is successful, no template is needed because the user will be redirected to ``success_url``. :param success_url: String containing the URL which is redirected to after a successful confirmation. Supplied argument must be able to be rendered by ``reverse`` function. :param extra_context: Dictionary of variables that are passed on to the template supplied by ``template_name``.
Below is the the instruction that describes the task: ### Input: Confirms an email address with a confirmation key. Confirms a new email address by running :func:`User.objects.confirm_email` method. If the method returns an :class:`User` the user will have his new e-mail address set and redirected to ``success_url``. If no ``User`` is returned the user will be represented with a fail message from ``template_name``. :param confirmation_key: String with a SHA1 representing the confirmation key used to verify a new email address. :param template_name: String containing the template name which should be rendered when confirmation fails. When confirmation is successful, no template is needed because the user will be redirected to ``success_url``. :param success_url: String containing the URL which is redirected to after a successful confirmation. Supplied argument must be able to be rendered by ``reverse`` function. :param extra_context: Dictionary of variables that are passed on to the template supplied by ``template_name``. ### Response: def email_confirm(request, confirmation_key, template_name='accounts/email_confirm_fail.html', success_url=None, extra_context=None): """ Confirms an email address with a confirmation key. Confirms a new email address by running :func:`User.objects.confirm_email` method. If the method returns an :class:`User` the user will have his new e-mail address set and redirected to ``success_url``. If no ``User`` is returned the user will be represented with a fail message from ``template_name``. :param confirmation_key: String with a SHA1 representing the confirmation key used to verify a new email address. :param template_name: String containing the template name which should be rendered when confirmation fails. When confirmation is successful, no template is needed because the user will be redirected to ``success_url``. :param success_url: String containing the URL which is redirected to after a successful confirmation. Supplied argument must be able to be rendered by ``reverse`` function. :param extra_context: Dictionary of variables that are passed on to the template supplied by ``template_name``. """ user = AccountsSignup.objects.confirm_email(confirmation_key) if user: if accounts_settings.ACCOUNTS_USE_MESSAGES: messages.success(request, _('Your email address has been changed.'), fail_silently=True) if success_url: redirect_to = success_url else: redirect_to = reverse('accounts_email_confirm_complete', kwargs={'username': user.username}) return redirect(redirect_to) else: if not extra_context: extra_context = dict() return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
def func_attr(f, attr): """ Helper function to get the attribute of a function like, name, code, defaults across Python 2.x and 3.x """ if hasattr(f, 'func_%s' % attr): return getattr(f, 'func_%s' % attr) elif hasattr(f, '__%s__' % attr): return getattr(f, '__%s__' % attr) else: raise ValueError('Object %s has no attr' % (str(f), attr))
Helper function to get the attribute of a function like, name, code, defaults across Python 2.x and 3.x
Below is the the instruction that describes the task: ### Input: Helper function to get the attribute of a function like, name, code, defaults across Python 2.x and 3.x ### Response: def func_attr(f, attr): """ Helper function to get the attribute of a function like, name, code, defaults across Python 2.x and 3.x """ if hasattr(f, 'func_%s' % attr): return getattr(f, 'func_%s' % attr) elif hasattr(f, '__%s__' % attr): return getattr(f, '__%s__' % attr) else: raise ValueError('Object %s has no attr' % (str(f), attr))
async def login( host: str, password: str, websession: ClientSession, *, port: int = 8080, ssl: bool = True, request_timeout: int = DEFAULT_TIMEOUT) -> Controller: """Authenticate against a RainMachine device.""" print('regenmaschine.client.login() is deprecated; see documentation!') client = Client(websession, request_timeout) await client.load_local(host, password, port, ssl) return next(iter(client.controllers.values()))
Authenticate against a RainMachine device.
Below is the the instruction that describes the task: ### Input: Authenticate against a RainMachine device. ### Response: async def login( host: str, password: str, websession: ClientSession, *, port: int = 8080, ssl: bool = True, request_timeout: int = DEFAULT_TIMEOUT) -> Controller: """Authenticate against a RainMachine device.""" print('regenmaschine.client.login() is deprecated; see documentation!') client = Client(websession, request_timeout) await client.load_local(host, password, port, ssl) return next(iter(client.controllers.values()))
def handle_connection(stream): ''' Handle a connection. The server operates a request/response cycle, so it performs a synchronous loop: 1) Read data from network into wsproto 2) Get next wsproto event 3) Handle event 4) Send data from wsproto to network :param stream: a socket stream ''' ws = WSConnection(ConnectionType.SERVER) # events is a generator that yields websocket event objects. Usually you # would say `for event in ws.events()`, but the synchronous nature of this # server requires us to use next(event) instead so that we can interleave # the network I/O. events = ws.events() running = True while running: # 1) Read data from network in_data = stream.recv(RECEIVE_BYTES) print('Received {} bytes'.format(len(in_data))) ws.receive_data(in_data) # 2) Get next wsproto event try: event = next(events) except StopIteration: print('Client connection dropped unexpectedly') return # 3) Handle event if isinstance(event, Request): # Negotiate new WebSocket connection print('Accepting WebSocket upgrade') out_data = ws.send(AcceptConnection()) elif isinstance(event, CloseConnection): # Print log message and break out print('Connection closed: code={}/{} reason={}'.format( event.code.value, event.code.name, event.reason)) out_data = ws.send(event.response()) running = False elif isinstance(event, TextMessage): # Reverse text and send it back to wsproto print('Received request and sending response') out_data = ws.send(Message(data=event.data[::-1])) elif isinstance(event, Ping): # wsproto handles ping events for you by placing a pong frame in # the outgoing buffer. You should not call pong() unless you want to # send an unsolicited pong frame. print('Received ping and sending pong') out_data = ws.send(event.response()) else: print('Unknown event: {!r}'.format(event)) # 4) Send data from wsproto to network print('Sending {} bytes'.format(len(out_data))) stream.send(out_data)
Handle a connection. The server operates a request/response cycle, so it performs a synchronous loop: 1) Read data from network into wsproto 2) Get next wsproto event 3) Handle event 4) Send data from wsproto to network :param stream: a socket stream
Below is the the instruction that describes the task: ### Input: Handle a connection. The server operates a request/response cycle, so it performs a synchronous loop: 1) Read data from network into wsproto 2) Get next wsproto event 3) Handle event 4) Send data from wsproto to network :param stream: a socket stream ### Response: def handle_connection(stream): ''' Handle a connection. The server operates a request/response cycle, so it performs a synchronous loop: 1) Read data from network into wsproto 2) Get next wsproto event 3) Handle event 4) Send data from wsproto to network :param stream: a socket stream ''' ws = WSConnection(ConnectionType.SERVER) # events is a generator that yields websocket event objects. Usually you # would say `for event in ws.events()`, but the synchronous nature of this # server requires us to use next(event) instead so that we can interleave # the network I/O. events = ws.events() running = True while running: # 1) Read data from network in_data = stream.recv(RECEIVE_BYTES) print('Received {} bytes'.format(len(in_data))) ws.receive_data(in_data) # 2) Get next wsproto event try: event = next(events) except StopIteration: print('Client connection dropped unexpectedly') return # 3) Handle event if isinstance(event, Request): # Negotiate new WebSocket connection print('Accepting WebSocket upgrade') out_data = ws.send(AcceptConnection()) elif isinstance(event, CloseConnection): # Print log message and break out print('Connection closed: code={}/{} reason={}'.format( event.code.value, event.code.name, event.reason)) out_data = ws.send(event.response()) running = False elif isinstance(event, TextMessage): # Reverse text and send it back to wsproto print('Received request and sending response') out_data = ws.send(Message(data=event.data[::-1])) elif isinstance(event, Ping): # wsproto handles ping events for you by placing a pong frame in # the outgoing buffer. You should not call pong() unless you want to # send an unsolicited pong frame. print('Received ping and sending pong') out_data = ws.send(event.response()) else: print('Unknown event: {!r}'.format(event)) # 4) Send data from wsproto to network print('Sending {} bytes'.format(len(out_data))) stream.send(out_data)
def unpack_rows(self, parameters_metadata, connection): """Unpack output or input/output parameters from the stored procedure call result :parameters_metadata: a stored procedure parameters metadata :returns: parameter values """ values = [] for param in parameters_metadata: # Unpack OUT or INOUT parameters' values if param.iotype != parameter_direction.IN: values.append( by_type_code[param.datatype].from_resultset(self.payload) ) yield tuple(values)
Unpack output or input/output parameters from the stored procedure call result :parameters_metadata: a stored procedure parameters metadata :returns: parameter values
Below is the the instruction that describes the task: ### Input: Unpack output or input/output parameters from the stored procedure call result :parameters_metadata: a stored procedure parameters metadata :returns: parameter values ### Response: def unpack_rows(self, parameters_metadata, connection): """Unpack output or input/output parameters from the stored procedure call result :parameters_metadata: a stored procedure parameters metadata :returns: parameter values """ values = [] for param in parameters_metadata: # Unpack OUT or INOUT parameters' values if param.iotype != parameter_direction.IN: values.append( by_type_code[param.datatype].from_resultset(self.payload) ) yield tuple(values)
def hexagon_coordinates(i, j, k): """ Computes coordinates of the constituent hexagons of a hexagonal heatmap. Parameters ---------- i, j, k: enumeration of the desired hexagon Returns ------- A numpy array of coordinates of the hexagon (unprojected) """ signature = "" for x in [i, j, k]: if x == 0: signature += "0" else: signature += "1" deltas = hexagon_deltas[signature] center = numpy.array([i, j, k]) return numpy.array([center + x for x in deltas])
Computes coordinates of the constituent hexagons of a hexagonal heatmap. Parameters ---------- i, j, k: enumeration of the desired hexagon Returns ------- A numpy array of coordinates of the hexagon (unprojected)
Below is the the instruction that describes the task: ### Input: Computes coordinates of the constituent hexagons of a hexagonal heatmap. Parameters ---------- i, j, k: enumeration of the desired hexagon Returns ------- A numpy array of coordinates of the hexagon (unprojected) ### Response: def hexagon_coordinates(i, j, k): """ Computes coordinates of the constituent hexagons of a hexagonal heatmap. Parameters ---------- i, j, k: enumeration of the desired hexagon Returns ------- A numpy array of coordinates of the hexagon (unprojected) """ signature = "" for x in [i, j, k]: if x == 0: signature += "0" else: signature += "1" deltas = hexagon_deltas[signature] center = numpy.array([i, j, k]) return numpy.array([center + x for x in deltas])
def read_archive(self,header,prepend=None): """ Extract a copy of WCS keywords from an open file header, if they have already been created and remember the prefix used for those keywords. Otherwise, setup the current WCS keywords as the archive values. """ # Start by looking for the any backup WCS keywords to # determine whether archived values are present and to set # the prefix used. _prefix = None _archive = False if header is not None: for kw in header.items(): if kw[0][1:] in self.wcstrans.keys(): _prefix = kw[0][0] _archive = True break if not _archive: self.archive(prepend=prepend) return # We have archive keywords and a defined prefix # Go through and append them to self.backup if _prefix is not None: self.prepend = _prefix else: self.prepend = DEFAULT_PREFIX for key in self.wcstrans.keys(): _archive_key = self._buildNewKeyname(key,_prefix) if key!= 'pixel scale': if _archive_key in header: self.orig_wcs[_archive_key] = header[_archive_key] else: self.orig_wcs[_archive_key] = header[key] self.backup[key] = _archive_key self.revert[_archive_key] = key # Establish plate scale value _cd11str = self.prepend+'CD1_1' _cd21str = self.prepend+'CD2_1' pscale = self.compute_pscale(self.orig_wcs[_cd11str],self.orig_wcs[_cd21str]) _archive_key = self.prepend.lower()+'pscale' self.orig_wcs[_archive_key] = pscale self.backup['pixel scale'] = _archive_key self.revert[_archive_key] = 'pixel scale' # Setup keyword to record when these keywords were backed up. if 'WCSCDATE' in header: self.orig_wcs['WCSCDATE'] = header['WCSCDATE'] else: self.orig_wcs['WCSCDATE'] = fileutil.getLTime() self.backup['WCSCDATE'] = 'WCSCDATE' self.revert['WCSCDATE'] = 'WCSCDATE'
Extract a copy of WCS keywords from an open file header, if they have already been created and remember the prefix used for those keywords. Otherwise, setup the current WCS keywords as the archive values.
Below is the the instruction that describes the task: ### Input: Extract a copy of WCS keywords from an open file header, if they have already been created and remember the prefix used for those keywords. Otherwise, setup the current WCS keywords as the archive values. ### Response: def read_archive(self,header,prepend=None): """ Extract a copy of WCS keywords from an open file header, if they have already been created and remember the prefix used for those keywords. Otherwise, setup the current WCS keywords as the archive values. """ # Start by looking for the any backup WCS keywords to # determine whether archived values are present and to set # the prefix used. _prefix = None _archive = False if header is not None: for kw in header.items(): if kw[0][1:] in self.wcstrans.keys(): _prefix = kw[0][0] _archive = True break if not _archive: self.archive(prepend=prepend) return # We have archive keywords and a defined prefix # Go through and append them to self.backup if _prefix is not None: self.prepend = _prefix else: self.prepend = DEFAULT_PREFIX for key in self.wcstrans.keys(): _archive_key = self._buildNewKeyname(key,_prefix) if key!= 'pixel scale': if _archive_key in header: self.orig_wcs[_archive_key] = header[_archive_key] else: self.orig_wcs[_archive_key] = header[key] self.backup[key] = _archive_key self.revert[_archive_key] = key # Establish plate scale value _cd11str = self.prepend+'CD1_1' _cd21str = self.prepend+'CD2_1' pscale = self.compute_pscale(self.orig_wcs[_cd11str],self.orig_wcs[_cd21str]) _archive_key = self.prepend.lower()+'pscale' self.orig_wcs[_archive_key] = pscale self.backup['pixel scale'] = _archive_key self.revert[_archive_key] = 'pixel scale' # Setup keyword to record when these keywords were backed up. if 'WCSCDATE' in header: self.orig_wcs['WCSCDATE'] = header['WCSCDATE'] else: self.orig_wcs['WCSCDATE'] = fileutil.getLTime() self.backup['WCSCDATE'] = 'WCSCDATE' self.revert['WCSCDATE'] = 'WCSCDATE'
def proposals(ctx, account): """ List proposals """ proposals = Proposals(account) t = PrettyTable( [ "id", "expiration", "proposer", "required approvals", "available approvals", "review period time", "proposal", ] ) t.align = "l" for proposal in proposals: if proposal.proposer: proposer = Account(proposal.proposer, peerplays_instance=ctx.peerplays)[ "name" ] else: proposer = "n/a" t.add_row( [ proposal["id"], proposal["expiration_time"], proposer, [ Account(x)["name"] for x in ( proposal["required_active_approvals"] + proposal["required_owner_approvals"] ) ], json.dumps( [Account(x)["name"] for x in proposal["available_active_approvals"]] + proposal["available_key_approvals"] + proposal["available_owner_approvals"], indent=1, ), proposal.get("review_period_time", None), json.dumps(proposal["proposed_transaction"], indent=4), ] ) click.echo(str(t))
List proposals
Below is the the instruction that describes the task: ### Input: List proposals ### Response: def proposals(ctx, account): """ List proposals """ proposals = Proposals(account) t = PrettyTable( [ "id", "expiration", "proposer", "required approvals", "available approvals", "review period time", "proposal", ] ) t.align = "l" for proposal in proposals: if proposal.proposer: proposer = Account(proposal.proposer, peerplays_instance=ctx.peerplays)[ "name" ] else: proposer = "n/a" t.add_row( [ proposal["id"], proposal["expiration_time"], proposer, [ Account(x)["name"] for x in ( proposal["required_active_approvals"] + proposal["required_owner_approvals"] ) ], json.dumps( [Account(x)["name"] for x in proposal["available_active_approvals"]] + proposal["available_key_approvals"] + proposal["available_owner_approvals"], indent=1, ), proposal.get("review_period_time", None), json.dumps(proposal["proposed_transaction"], indent=4), ] ) click.echo(str(t))
def inputs_valid(self, outputs=None): """Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid. """ if self.operation == Transaction.CREATE: # NOTE: Since in the case of a `CREATE`-transaction we do not have # to check for outputs, we're just submitting dummy # values to the actual method. This simplifies it's logic # greatly, as we do not have to check against `None` values. return self._inputs_valid(['dummyvalue' for _ in self.inputs]) elif self.operation == Transaction.TRANSFER: return self._inputs_valid([output.fulfillment.condition_uri for output in outputs]) else: allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS) raise TypeError('`operation` must be one of {}' .format(allowed_ops))
Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid.
Below is the the instruction that describes the task: ### Input: Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid. ### Response: def inputs_valid(self, outputs=None): """Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid. """ if self.operation == Transaction.CREATE: # NOTE: Since in the case of a `CREATE`-transaction we do not have # to check for outputs, we're just submitting dummy # values to the actual method. This simplifies it's logic # greatly, as we do not have to check against `None` values. return self._inputs_valid(['dummyvalue' for _ in self.inputs]) elif self.operation == Transaction.TRANSFER: return self._inputs_valid([output.fulfillment.condition_uri for output in outputs]) else: allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS) raise TypeError('`operation` must be one of {}' .format(allowed_ops))
def delete_reserved_ip_address(self, name): ''' Deletes a reserved IP address from the specified subscription. name: Required. Name of the reserved IP address. ''' _validate_not_none('name', name) return self._perform_delete(self._get_reserved_ip_path(name), as_async=True)
Deletes a reserved IP address from the specified subscription. name: Required. Name of the reserved IP address.
Below is the the instruction that describes the task: ### Input: Deletes a reserved IP address from the specified subscription. name: Required. Name of the reserved IP address. ### Response: def delete_reserved_ip_address(self, name): ''' Deletes a reserved IP address from the specified subscription. name: Required. Name of the reserved IP address. ''' _validate_not_none('name', name) return self._perform_delete(self._get_reserved_ip_path(name), as_async=True)
def get_users(self, condensed=False): '''Grabs all users in the slack team This should should only be used for getting list of all users. Do not use it for searching users. Use get_user_info instead. Args: condensed (bool): if true triggers list condensing functionality Returns: dict: Dict of users in Slack team. See also: https://api.slack.com/methods/users.list ''' user_list = self.slack_client.api_call('users.list') if not user_list.get('ok'): return None if condensed: users = [{'id': item.get('id'), 'name': item.get('name'), 'display_name': item.get('profile').get('display_name')} for item in user_list.get('members')] return users else: return user_list
Grabs all users in the slack team This should should only be used for getting list of all users. Do not use it for searching users. Use get_user_info instead. Args: condensed (bool): if true triggers list condensing functionality Returns: dict: Dict of users in Slack team. See also: https://api.slack.com/methods/users.list
Below is the the instruction that describes the task: ### Input: Grabs all users in the slack team This should should only be used for getting list of all users. Do not use it for searching users. Use get_user_info instead. Args: condensed (bool): if true triggers list condensing functionality Returns: dict: Dict of users in Slack team. See also: https://api.slack.com/methods/users.list ### Response: def get_users(self, condensed=False): '''Grabs all users in the slack team This should should only be used for getting list of all users. Do not use it for searching users. Use get_user_info instead. Args: condensed (bool): if true triggers list condensing functionality Returns: dict: Dict of users in Slack team. See also: https://api.slack.com/methods/users.list ''' user_list = self.slack_client.api_call('users.list') if not user_list.get('ok'): return None if condensed: users = [{'id': item.get('id'), 'name': item.get('name'), 'display_name': item.get('profile').get('display_name')} for item in user_list.get('members')] return users else: return user_list
def xmlprint(element): """ pretty prints an ElementTree (or an Element of it), or the XML representation of a SaltDocument (or an element thereof, e.g. a node, edge, layer etc.) """ if isinstance(element, (etree._Element, etree._ElementTree)): print etree.tostring(element, pretty_print=True) else: if hasattr(element, 'xml'): print etree.tostring(element.xml, pretty_print=True)
pretty prints an ElementTree (or an Element of it), or the XML representation of a SaltDocument (or an element thereof, e.g. a node, edge, layer etc.)
Below is the the instruction that describes the task: ### Input: pretty prints an ElementTree (or an Element of it), or the XML representation of a SaltDocument (or an element thereof, e.g. a node, edge, layer etc.) ### Response: def xmlprint(element): """ pretty prints an ElementTree (or an Element of it), or the XML representation of a SaltDocument (or an element thereof, e.g. a node, edge, layer etc.) """ if isinstance(element, (etree._Element, etree._ElementTree)): print etree.tostring(element, pretty_print=True) else: if hasattr(element, 'xml'): print etree.tostring(element.xml, pretty_print=True)
def get_attribute_values(self, att_name): """ Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name """ lc = [] if not CPEComponent.is_valid_attribute(att_name): errmsg = "Invalid attribute name: {0}".format(att_name) raise ValueError(errmsg) for pk in CPE.CPE_PART_KEYS: elements = self.get(pk) for elem in elements: comp = elem.get(att_name) if isinstance(comp, CPEComponentAnyValue): value = CPEComponent2_3_FS.VALUE_ANY elif isinstance(comp, CPEComponentNotApplicable): value = CPEComponent2_3_FS.VALUE_NA else: value = comp.get_value() lc.append(value) return lc
Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name
Below is the the instruction that describes the task: ### Input: Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name ### Response: def get_attribute_values(self, att_name): """ Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name """ lc = [] if not CPEComponent.is_valid_attribute(att_name): errmsg = "Invalid attribute name: {0}".format(att_name) raise ValueError(errmsg) for pk in CPE.CPE_PART_KEYS: elements = self.get(pk) for elem in elements: comp = elem.get(att_name) if isinstance(comp, CPEComponentAnyValue): value = CPEComponent2_3_FS.VALUE_ANY elif isinstance(comp, CPEComponentNotApplicable): value = CPEComponent2_3_FS.VALUE_NA else: value = comp.get_value() lc.append(value) return lc
def from_string(cls, token_string): """ `token_string` should be the string representation from the server. """ # unhexlify works fine with unicode input in everythin but pypy3, where it Raises "TypeError: 'str' does not support the buffer interface" if isinstance(token_string, six.text_type): token_string = token_string.encode('ascii') # The BOP stores a hex string return cls(unhexlify(token_string))
`token_string` should be the string representation from the server.
Below is the the instruction that describes the task: ### Input: `token_string` should be the string representation from the server. ### Response: def from_string(cls, token_string): """ `token_string` should be the string representation from the server. """ # unhexlify works fine with unicode input in everythin but pypy3, where it Raises "TypeError: 'str' does not support the buffer interface" if isinstance(token_string, six.text_type): token_string = token_string.encode('ascii') # The BOP stores a hex string return cls(unhexlify(token_string))
def try_encode(field_encoders, entity_dict): """ Inner encoding and try return string from entity dictionary :param field_encoders: :param entity_dict: :return: """ result = '' for field_encoder in field_encoders: try: result += field_encoder.encode(entity_dict) except KeyError as e: return False return result
Inner encoding and try return string from entity dictionary :param field_encoders: :param entity_dict: :return:
Below is the the instruction that describes the task: ### Input: Inner encoding and try return string from entity dictionary :param field_encoders: :param entity_dict: :return: ### Response: def try_encode(field_encoders, entity_dict): """ Inner encoding and try return string from entity dictionary :param field_encoders: :param entity_dict: :return: """ result = '' for field_encoder in field_encoders: try: result += field_encoder.encode(entity_dict) except KeyError as e: return False return result
def enqueue_sync(self, func, *func_args): ''' Enqueue an arbitrary synchronous function. Deprecated: Use async version instead ''' worker = self.pick_sticky(0) # just pick first always args = (func,) + func_args coro = worker.enqueue(enums.Task.FUNC, args) asyncio.ensure_future(coro)
Enqueue an arbitrary synchronous function. Deprecated: Use async version instead
Below is the the instruction that describes the task: ### Input: Enqueue an arbitrary synchronous function. Deprecated: Use async version instead ### Response: def enqueue_sync(self, func, *func_args): ''' Enqueue an arbitrary synchronous function. Deprecated: Use async version instead ''' worker = self.pick_sticky(0) # just pick first always args = (func,) + func_args coro = worker.enqueue(enums.Task.FUNC, args) asyncio.ensure_future(coro)
def prune(self, minimum_word_frequency_percentage=1): """ Filter out words that occur less than minimum_word_frequency times. :param minimum_word_frequency_percentage: minimum frequency of words to keep """ pruned_resulting_documents = [] for document in self.resulting_documents: new_document = [] for word in document: if self.word_in_how_many_documents[word] >= minimum_word_frequency_percentage / 100. * len( self.resulting_documents): new_document.append(word) pruned_resulting_documents.append(new_document) self.resulting_documents = pruned_resulting_documents
Filter out words that occur less than minimum_word_frequency times. :param minimum_word_frequency_percentage: minimum frequency of words to keep
Below is the the instruction that describes the task: ### Input: Filter out words that occur less than minimum_word_frequency times. :param minimum_word_frequency_percentage: minimum frequency of words to keep ### Response: def prune(self, minimum_word_frequency_percentage=1): """ Filter out words that occur less than minimum_word_frequency times. :param minimum_word_frequency_percentage: minimum frequency of words to keep """ pruned_resulting_documents = [] for document in self.resulting_documents: new_document = [] for word in document: if self.word_in_how_many_documents[word] >= minimum_word_frequency_percentage / 100. * len( self.resulting_documents): new_document.append(word) pruned_resulting_documents.append(new_document) self.resulting_documents = pruned_resulting_documents
def parse_timemap_from_blocks(profile_block_list): """ Build a map from times to line_profile blocks """ prefix_list = [] timemap = ut.ddict(list) for ix in range(len(profile_block_list)): block = profile_block_list[ix] total_time = get_block_totaltime(block) # Blocks without time go at the front of sorted output if total_time is None: prefix_list.append(block) # Blocks that are not run are not appended to output elif total_time != 0: timemap[total_time].append(block) return prefix_list, timemap
Build a map from times to line_profile blocks
Below is the the instruction that describes the task: ### Input: Build a map from times to line_profile blocks ### Response: def parse_timemap_from_blocks(profile_block_list): """ Build a map from times to line_profile blocks """ prefix_list = [] timemap = ut.ddict(list) for ix in range(len(profile_block_list)): block = profile_block_list[ix] total_time = get_block_totaltime(block) # Blocks without time go at the front of sorted output if total_time is None: prefix_list.append(block) # Blocks that are not run are not appended to output elif total_time != 0: timemap[total_time].append(block) return prefix_list, timemap
def add_command_line_options(add_argument, use_short_options=True): """ :param add_argument: The add_argument method of an ArgParser. :param use_short_options: Whether or not to add short options. """ logger_args = ("--enable-logger",) credentials_args = ("--credentials",) if use_short_options: logger_args += ('-l',) credentials_args += ('-c',) add_argument(*logger_args, dest='enabled_loggers', action="append", default=[], help="Enable the specified logger.") add_argument(*credentials_args, dest='credential_files', action="append", default=[], help="Use the specified credentials module to update " "the values in okcupyd.settings.") add_argument('--echo', dest='echo', action='store_true', default=False, help="Echo SQL.")
:param add_argument: The add_argument method of an ArgParser. :param use_short_options: Whether or not to add short options.
Below is the the instruction that describes the task: ### Input: :param add_argument: The add_argument method of an ArgParser. :param use_short_options: Whether or not to add short options. ### Response: def add_command_line_options(add_argument, use_short_options=True): """ :param add_argument: The add_argument method of an ArgParser. :param use_short_options: Whether or not to add short options. """ logger_args = ("--enable-logger",) credentials_args = ("--credentials",) if use_short_options: logger_args += ('-l',) credentials_args += ('-c',) add_argument(*logger_args, dest='enabled_loggers', action="append", default=[], help="Enable the specified logger.") add_argument(*credentials_args, dest='credential_files', action="append", default=[], help="Use the specified credentials module to update " "the values in okcupyd.settings.") add_argument('--echo', dest='echo', action='store_true', default=False, help="Echo SQL.")
def endpoint_from_job(self, job_name, initial_instance_count, instance_type, deployment_image=None, name=None, role=None, wait=True, model_environment_vars=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT, accelerator_type=None): """Create an ``Endpoint`` using the results of a successful training job. Specify the job name, Docker image containing the inference code, and hardware configuration to deploy the model. Internally the API, creates an Amazon SageMaker model (that describes the model artifacts and the Docker image containing inference code), endpoint configuration (describing the hardware to deploy for hosting the model), and creates an ``Endpoint`` (launches the EC2 instances and deploys the model on them). In response, the API returns the endpoint name to which you can send requests for inferences. Args: job_name (str): Name of the training job to deploy the results of. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. deployment_image (str): The Docker image which defines the inference code to be used as the entry point for accepting prediction requests. If not specified, uses the image used for the training job. name (str): Name of the ``Endpoint`` to create. If not specified, uses the training job name. role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. You must grant sufficient permissions to this role. wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True). model_environment_vars (dict[str, str]): Environment variables to set on the model container (default: None). vpc_config_override (dict[str, list[str]]): Overrides VpcConfig set on the model. Default: use VpcConfig from training job. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: str: Name of the ``Endpoint`` that is created. """ job_desc = self.sagemaker_client.describe_training_job(TrainingJobName=job_name) output_url = job_desc['ModelArtifacts']['S3ModelArtifacts'] deployment_image = deployment_image or job_desc['AlgorithmSpecification']['TrainingImage'] role = role or job_desc['RoleArn'] name = name or job_name vpc_config_override = _vpc_config_from_training_job(job_desc, vpc_config_override) return self.endpoint_from_model_data(model_s3_location=output_url, deployment_image=deployment_image, initial_instance_count=initial_instance_count, instance_type=instance_type, name=name, role=role, wait=wait, model_environment_vars=model_environment_vars, model_vpc_config=vpc_config_override, accelerator_type=accelerator_type)
Create an ``Endpoint`` using the results of a successful training job. Specify the job name, Docker image containing the inference code, and hardware configuration to deploy the model. Internally the API, creates an Amazon SageMaker model (that describes the model artifacts and the Docker image containing inference code), endpoint configuration (describing the hardware to deploy for hosting the model), and creates an ``Endpoint`` (launches the EC2 instances and deploys the model on them). In response, the API returns the endpoint name to which you can send requests for inferences. Args: job_name (str): Name of the training job to deploy the results of. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. deployment_image (str): The Docker image which defines the inference code to be used as the entry point for accepting prediction requests. If not specified, uses the image used for the training job. name (str): Name of the ``Endpoint`` to create. If not specified, uses the training job name. role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. You must grant sufficient permissions to this role. wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True). model_environment_vars (dict[str, str]): Environment variables to set on the model container (default: None). vpc_config_override (dict[str, list[str]]): Overrides VpcConfig set on the model. Default: use VpcConfig from training job. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: str: Name of the ``Endpoint`` that is created.
Below is the the instruction that describes the task: ### Input: Create an ``Endpoint`` using the results of a successful training job. Specify the job name, Docker image containing the inference code, and hardware configuration to deploy the model. Internally the API, creates an Amazon SageMaker model (that describes the model artifacts and the Docker image containing inference code), endpoint configuration (describing the hardware to deploy for hosting the model), and creates an ``Endpoint`` (launches the EC2 instances and deploys the model on them). In response, the API returns the endpoint name to which you can send requests for inferences. Args: job_name (str): Name of the training job to deploy the results of. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. deployment_image (str): The Docker image which defines the inference code to be used as the entry point for accepting prediction requests. If not specified, uses the image used for the training job. name (str): Name of the ``Endpoint`` to create. If not specified, uses the training job name. role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. You must grant sufficient permissions to this role. wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True). model_environment_vars (dict[str, str]): Environment variables to set on the model container (default: None). vpc_config_override (dict[str, list[str]]): Overrides VpcConfig set on the model. Default: use VpcConfig from training job. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: str: Name of the ``Endpoint`` that is created. ### Response: def endpoint_from_job(self, job_name, initial_instance_count, instance_type, deployment_image=None, name=None, role=None, wait=True, model_environment_vars=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT, accelerator_type=None): """Create an ``Endpoint`` using the results of a successful training job. Specify the job name, Docker image containing the inference code, and hardware configuration to deploy the model. Internally the API, creates an Amazon SageMaker model (that describes the model artifacts and the Docker image containing inference code), endpoint configuration (describing the hardware to deploy for hosting the model), and creates an ``Endpoint`` (launches the EC2 instances and deploys the model on them). In response, the API returns the endpoint name to which you can send requests for inferences. Args: job_name (str): Name of the training job to deploy the results of. initial_instance_count (int): Minimum number of EC2 instances to launch. The actual number of active instances for an endpoint at any given time varies due to autoscaling. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. deployment_image (str): The Docker image which defines the inference code to be used as the entry point for accepting prediction requests. If not specified, uses the image used for the training job. name (str): Name of the ``Endpoint`` to create. If not specified, uses the training job name. role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. You must grant sufficient permissions to this role. wait (bool): Whether to wait for the endpoint deployment to complete before returning (default: True). model_environment_vars (dict[str, str]): Environment variables to set on the model container (default: None). vpc_config_override (dict[str, list[str]]): Overrides VpcConfig set on the model. Default: use VpcConfig from training job. * 'Subnets' (list[str]): List of subnet ids. * 'SecurityGroupIds' (list[str]): List of security group ids. accelerator_type (str): Type of Elastic Inference accelerator to attach to the instance. For example, 'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html Returns: str: Name of the ``Endpoint`` that is created. """ job_desc = self.sagemaker_client.describe_training_job(TrainingJobName=job_name) output_url = job_desc['ModelArtifacts']['S3ModelArtifacts'] deployment_image = deployment_image or job_desc['AlgorithmSpecification']['TrainingImage'] role = role or job_desc['RoleArn'] name = name or job_name vpc_config_override = _vpc_config_from_training_job(job_desc, vpc_config_override) return self.endpoint_from_model_data(model_s3_location=output_url, deployment_image=deployment_image, initial_instance_count=initial_instance_count, instance_type=instance_type, name=name, role=role, wait=wait, model_environment_vars=model_environment_vars, model_vpc_config=vpc_config_override, accelerator_type=accelerator_type)
def run(args): """Run a CWL preparation pipeline. """ dirs, config, run_info_yaml = run_info.prep_system(args.sample_config, args.systemconfig) integrations = args.integrations if hasattr(args, "integrations") else {} world = run_info.organize(dirs, config, run_info_yaml, is_cwl=True, integrations=integrations) create.from_world(world, run_info_yaml, integrations=integrations, add_container_tag=args.add_container_tag)
Run a CWL preparation pipeline.
Below is the the instruction that describes the task: ### Input: Run a CWL preparation pipeline. ### Response: def run(args): """Run a CWL preparation pipeline. """ dirs, config, run_info_yaml = run_info.prep_system(args.sample_config, args.systemconfig) integrations = args.integrations if hasattr(args, "integrations") else {} world = run_info.organize(dirs, config, run_info_yaml, is_cwl=True, integrations=integrations) create.from_world(world, run_info_yaml, integrations=integrations, add_container_tag=args.add_container_tag)
def mavlink_to_json(msg): '''Translate mavlink python messages in json string''' ret = '\"%s\": {' % msg._type for fieldname in msg._fieldnames: data = getattr(msg, fieldname) ret += '\"%s\" : \"%s\", ' % (fieldname, data) ret = ret[0:-2] + '}' return ret
Translate mavlink python messages in json string
Below is the the instruction that describes the task: ### Input: Translate mavlink python messages in json string ### Response: def mavlink_to_json(msg): '''Translate mavlink python messages in json string''' ret = '\"%s\": {' % msg._type for fieldname in msg._fieldnames: data = getattr(msg, fieldname) ret += '\"%s\" : \"%s\", ' % (fieldname, data) ret = ret[0:-2] + '}' return ret
def find_command(self, argv): """Given an argument list, find a command and return the processor and any remaining arguments. """ search_args = argv[:] name = '' while search_args: if search_args[0].startswith('-'): name = '%s %s' % (name, search_args[0]) raise ValueError('Invalid command %r' % name) next_val = search_args.pop(0) name = '%s %s' % (name, next_val) if name else next_val if name in self.commands: cmd_ep = self.commands[name] if hasattr(cmd_ep, 'resolve'): cmd_factory = cmd_ep.resolve() else: # NOTE(dhellmann): Some fake classes don't take # require as an argument. Yay? arg_spec = inspect.getargspec(cmd_ep.load) if 'require' in arg_spec[0]: cmd_factory = cmd_ep.load(require=False) else: cmd_factory = cmd_ep.load() return (cmd_factory, name, search_args) else: raise ValueError('Unknown command %r' % next(iter(argv), ''))
Given an argument list, find a command and return the processor and any remaining arguments.
Below is the the instruction that describes the task: ### Input: Given an argument list, find a command and return the processor and any remaining arguments. ### Response: def find_command(self, argv): """Given an argument list, find a command and return the processor and any remaining arguments. """ search_args = argv[:] name = '' while search_args: if search_args[0].startswith('-'): name = '%s %s' % (name, search_args[0]) raise ValueError('Invalid command %r' % name) next_val = search_args.pop(0) name = '%s %s' % (name, next_val) if name else next_val if name in self.commands: cmd_ep = self.commands[name] if hasattr(cmd_ep, 'resolve'): cmd_factory = cmd_ep.resolve() else: # NOTE(dhellmann): Some fake classes don't take # require as an argument. Yay? arg_spec = inspect.getargspec(cmd_ep.load) if 'require' in arg_spec[0]: cmd_factory = cmd_ep.load(require=False) else: cmd_factory = cmd_ep.load() return (cmd_factory, name, search_args) else: raise ValueError('Unknown command %r' % next(iter(argv), ''))
def report(function, *args, **kwds): """Run a function, catch, report and discard exceptions""" try: function(*args, **kwds) except Exception: traceback.print_exc()
Run a function, catch, report and discard exceptions
Below is the the instruction that describes the task: ### Input: Run a function, catch, report and discard exceptions ### Response: def report(function, *args, **kwds): """Run a function, catch, report and discard exceptions""" try: function(*args, **kwds) except Exception: traceback.print_exc()
def validate_ids(ctx, param, value): """Validate a list of IDs and convert them to a list.""" if not value: return None ids = [x.strip() for x in value.split(',')] for id_item in ids: if not id_item.isdigit(): raise click.BadParameter('Non-numeric value "{0}" provided for an ID.'.format(id_item)) return ids
Validate a list of IDs and convert them to a list.
Below is the the instruction that describes the task: ### Input: Validate a list of IDs and convert them to a list. ### Response: def validate_ids(ctx, param, value): """Validate a list of IDs and convert them to a list.""" if not value: return None ids = [x.strip() for x in value.split(',')] for id_item in ids: if not id_item.isdigit(): raise click.BadParameter('Non-numeric value "{0}" provided for an ID.'.format(id_item)) return ids
def append_partition_by_name(self, db_name, tbl_name, part_name): """ Parameters: - db_name - tbl_name - part_name """ self.send_append_partition_by_name(db_name, tbl_name, part_name) return self.recv_append_partition_by_name()
Parameters: - db_name - tbl_name - part_name
Below is the the instruction that describes the task: ### Input: Parameters: - db_name - tbl_name - part_name ### Response: def append_partition_by_name(self, db_name, tbl_name, part_name): """ Parameters: - db_name - tbl_name - part_name """ self.send_append_partition_by_name(db_name, tbl_name, part_name) return self.recv_append_partition_by_name()
def _resolved_type(self): """Return the type for the columns, and a flag to indicate that the column has codes.""" import datetime self.type_ratios = {test: (float(self.type_counts[test]) / float(self.count)) if self.count else None for test, testf in tests + [(None, None)]} # If it is more than 5% str, it's a str try: if self.type_ratios.get(text_type,0) + self.type_ratios.get(binary_type,0) > .05: if self.type_counts[text_type] > 0: return text_type, False elif self.type_counts[binary_type] > 0: return binary_type, False except TypeError as e: # This is probably the result of the type being unknown pass if self.type_counts[datetime.datetime] > 0: num_type = datetime.datetime elif self.type_counts[datetime.date] > 0: num_type = datetime.date elif self.type_counts[datetime.time] > 0: num_type = datetime.time elif self.type_counts[float] > 0: num_type = float elif self.type_counts[int] > 0: num_type = int elif self.type_counts[text_type] > 0: num_type = text_type elif self.type_counts[binary_type] > 0: num_type = binary_type else: num_type = unknown if self.type_counts[binary_type] > 0 and num_type != binary_type: has_codes = True else: has_codes = False return num_type, has_codes
Return the type for the columns, and a flag to indicate that the column has codes.
Below is the the instruction that describes the task: ### Input: Return the type for the columns, and a flag to indicate that the column has codes. ### Response: def _resolved_type(self): """Return the type for the columns, and a flag to indicate that the column has codes.""" import datetime self.type_ratios = {test: (float(self.type_counts[test]) / float(self.count)) if self.count else None for test, testf in tests + [(None, None)]} # If it is more than 5% str, it's a str try: if self.type_ratios.get(text_type,0) + self.type_ratios.get(binary_type,0) > .05: if self.type_counts[text_type] > 0: return text_type, False elif self.type_counts[binary_type] > 0: return binary_type, False except TypeError as e: # This is probably the result of the type being unknown pass if self.type_counts[datetime.datetime] > 0: num_type = datetime.datetime elif self.type_counts[datetime.date] > 0: num_type = datetime.date elif self.type_counts[datetime.time] > 0: num_type = datetime.time elif self.type_counts[float] > 0: num_type = float elif self.type_counts[int] > 0: num_type = int elif self.type_counts[text_type] > 0: num_type = text_type elif self.type_counts[binary_type] > 0: num_type = binary_type else: num_type = unknown if self.type_counts[binary_type] > 0 and num_type != binary_type: has_codes = True else: has_codes = False return num_type, has_codes
def rpc_get_DID_record(self, did, **con_info): """ Given a DID, return the name or subdomain it corresponds to """ if not isinstance(did, (str,unicode)): return {'error': 'Invalid DID: not a string', 'http_status': 400} try: did_info = parse_DID(did) except: return {'error': 'Invalid DID', 'http_status': 400} res = None if did_info['name_type'] == 'name': res = self.get_name_DID_record(did) elif did_info['name_type'] == 'subdomain': res = self.get_subdomain_DID_record(did) if 'error' in res: return {'error': res['error'], 'http_status': res.get('http_status', 404)} return self.success_response({'record': res['record']})
Given a DID, return the name or subdomain it corresponds to
Below is the the instruction that describes the task: ### Input: Given a DID, return the name or subdomain it corresponds to ### Response: def rpc_get_DID_record(self, did, **con_info): """ Given a DID, return the name or subdomain it corresponds to """ if not isinstance(did, (str,unicode)): return {'error': 'Invalid DID: not a string', 'http_status': 400} try: did_info = parse_DID(did) except: return {'error': 'Invalid DID', 'http_status': 400} res = None if did_info['name_type'] == 'name': res = self.get_name_DID_record(did) elif did_info['name_type'] == 'subdomain': res = self.get_subdomain_DID_record(did) if 'error' in res: return {'error': res['error'], 'http_status': res.get('http_status', 404)} return self.success_response({'record': res['record']})
def leave_room(self, sid, namespace, room): """Remove a client from a room.""" try: del self.rooms[namespace][room][sid] if len(self.rooms[namespace][room]) == 0: del self.rooms[namespace][room] if len(self.rooms[namespace]) == 0: del self.rooms[namespace] except KeyError: pass
Remove a client from a room.
Below is the the instruction that describes the task: ### Input: Remove a client from a room. ### Response: def leave_room(self, sid, namespace, room): """Remove a client from a room.""" try: del self.rooms[namespace][room][sid] if len(self.rooms[namespace][room]) == 0: del self.rooms[namespace][room] if len(self.rooms[namespace]) == 0: del self.rooms[namespace] except KeyError: pass
def force_string(val=None): """Force a string representation of an object Args: val: object to parse into a string Returns: str: String representation """ if val is None: return '' if isinstance(val, list): newval = [str(x) for x in val] return ';'.join(newval) if isinstance(val, str): return val else: return str(val)
Force a string representation of an object Args: val: object to parse into a string Returns: str: String representation
Below is the the instruction that describes the task: ### Input: Force a string representation of an object Args: val: object to parse into a string Returns: str: String representation ### Response: def force_string(val=None): """Force a string representation of an object Args: val: object to parse into a string Returns: str: String representation """ if val is None: return '' if isinstance(val, list): newval = [str(x) for x in val] return ';'.join(newval) if isinstance(val, str): return val else: return str(val)
def invoke(self, args, kwargs): """ Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin} or I{subclass of} L{Object} """ simulation = kwargs[self.injkey] msg = simulation.get('msg') reply = simulation.get('reply') fault = simulation.get('fault') if msg is None: if reply is not None: return self.__reply(reply, args, kwargs) if fault is not None: return self.__fault(fault) raise Exception('(reply|fault) expected when msg=None') sax = Parser() msg = sax.parse(string=msg) return self.send(msg)
Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin} or I{subclass of} L{Object}
Below is the the instruction that describes the task: ### Input: Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin} or I{subclass of} L{Object} ### Response: def invoke(self, args, kwargs): """ Send the required soap message to invoke the specified method @param args: A list of args for the method invoked. @type args: list @param kwargs: Named (keyword) args for the method invoked. @type kwargs: dict @return: The result of the method invocation. @rtype: I{builtin} or I{subclass of} L{Object} """ simulation = kwargs[self.injkey] msg = simulation.get('msg') reply = simulation.get('reply') fault = simulation.get('fault') if msg is None: if reply is not None: return self.__reply(reply, args, kwargs) if fault is not None: return self.__fault(fault) raise Exception('(reply|fault) expected when msg=None') sax = Parser() msg = sax.parse(string=msg) return self.send(msg)
def _got_features(self, features): """Process incoming <stream:features/> element. [initiating entity only] The received features node is available in `features`.""" self.features = features logger.debug("got features, passing to event handlers...") handled = self.event(GotFeaturesEvent(self.features)) logger.debug(" handled: {0}".format(handled)) if not handled: mandatory_handled = [] mandatory_not_handled = [] logger.debug(" passing to stream features handlers: {0}" .format(self._stream_feature_handlers)) for handler in self._stream_feature_handlers: ret = handler.handle_stream_features(self, self.features) if ret is None: continue elif isinstance(ret, StreamFeatureHandled): if ret.mandatory: mandatory_handled.append(unicode(ret)) break break elif isinstance(ret, StreamFeatureNotHandled): if ret.mandatory: mandatory_not_handled.append(unicode(ret)) break else: raise ValueError("Wrong value returned from a stream" " feature handler: {0!r}".format(ret)) if mandatory_not_handled and not mandatory_handled: self.send_stream_error("unsupported-feature") raise FatalStreamError( u"Unsupported mandatory-to-implement features: " + u" ".join(mandatory_not_handled))
Process incoming <stream:features/> element. [initiating entity only] The received features node is available in `features`.
Below is the the instruction that describes the task: ### Input: Process incoming <stream:features/> element. [initiating entity only] The received features node is available in `features`. ### Response: def _got_features(self, features): """Process incoming <stream:features/> element. [initiating entity only] The received features node is available in `features`.""" self.features = features logger.debug("got features, passing to event handlers...") handled = self.event(GotFeaturesEvent(self.features)) logger.debug(" handled: {0}".format(handled)) if not handled: mandatory_handled = [] mandatory_not_handled = [] logger.debug(" passing to stream features handlers: {0}" .format(self._stream_feature_handlers)) for handler in self._stream_feature_handlers: ret = handler.handle_stream_features(self, self.features) if ret is None: continue elif isinstance(ret, StreamFeatureHandled): if ret.mandatory: mandatory_handled.append(unicode(ret)) break break elif isinstance(ret, StreamFeatureNotHandled): if ret.mandatory: mandatory_not_handled.append(unicode(ret)) break else: raise ValueError("Wrong value returned from a stream" " feature handler: {0!r}".format(ret)) if mandatory_not_handled and not mandatory_handled: self.send_stream_error("unsupported-feature") raise FatalStreamError( u"Unsupported mandatory-to-implement features: " + u" ".join(mandatory_not_handled))
def __new_job_sync(self, message, future_results): """ Synchronous part of _new_job. Creates needed directories, copy files, and starts the container. """ course_id = message.course_id task_id = message.task_id debug = message.debug environment_name = message.environment enable_network = message.enable_network time_limit = message.time_limit hard_time_limit = message.hard_time_limit or time_limit * 3 mem_limit = message.mem_limit course_fs = self.tasks_fs.from_subfolder(course_id) task_fs = course_fs.from_subfolder(task_id) if not course_fs.exists() or not task_fs.exists(): self._logger.warning("Task %s/%s unavailable on this agent", course_id, task_id) raise CannotCreateJobException('Task unavailable on agent. Please retry later, the agents should synchronize soon. ' 'If the error persists, please contact your course administrator.') # Check for realistic memory limit value if mem_limit < 20: mem_limit = 20 elif mem_limit > self._max_memory_per_slot: self._logger.warning("Task %s/%s ask for too much memory (%dMB)! Available: %dMB", course_id, task_id, mem_limit, self._max_memory_per_slot) raise CannotCreateJobException('Not enough memory on agent (available: %dMB). Please contact your course administrator.' % self._max_memory_per_slot) if environment_name not in self._containers: self._logger.warning("Task %s/%s ask for an unknown environment %s (not in aliases)", course_id, task_id, environment_name) raise CannotCreateJobException('Unknown container. Please contact your course administrator.') environment = self._containers[environment_name]["id"] ports_needed = self._containers[environment_name]["ports"] if debug == "ssh" and 22 not in ports_needed: ports_needed.append(22) ports = {} if len(ports_needed) > 0: time_limit = 30 * 60 hard_time_limit = 30 * 60 for p in ports_needed: if len(self._external_ports) == 0: self._logger.warning("User asked for a port but no one are available") raise CannotCreateJobException('No ports are available right now. Please retry later.') ports[p] = self._external_ports.pop() # Create directories for storing all the data for the job try: container_path = tempfile.mkdtemp(dir=self._tmp_dir) except Exception as e: self._logger.error("Cannot make container temp directory! %s", str(e), exc_info=True) for p in ports: self._external_ports.add(ports[p]) raise CannotCreateJobException('Cannot make container temp directory.') task_path = path_join(container_path, 'task') # tmp_dir/id/task/ course_path = path_join(container_path, 'course') sockets_path = path_join(container_path, 'sockets') # tmp_dir/id/socket/ student_path = path_join(task_path, 'student') # tmp_dir/id/task/student/ systemfiles_path = path_join(task_path, 'systemfiles') # tmp_dir/id/task/systemfiles/ course_common_path = path_join(course_path, 'common') course_common_student_path = path_join(course_path, 'common', 'student') # Create the needed directories os.mkdir(sockets_path) os.chmod(container_path, 0o777) os.chmod(sockets_path, 0o777) os.mkdir(course_path) # TODO: avoid copy task_fs.copy_from(None, task_path) os.chmod(task_path, 0o777) if not os.path.exists(student_path): os.mkdir(student_path) os.chmod(student_path, 0o777) # Copy common and common/student if needed # TODO: avoid copy if course_fs.from_subfolder("$common").exists(): course_fs.from_subfolder("$common").copy_from(None, course_common_path) else: os.mkdir(course_common_path) if course_fs.from_subfolder("$common").from_subfolder("student").exists(): course_fs.from_subfolder("$common").from_subfolder("student").copy_from(None, course_common_student_path) else: os.mkdir(course_common_student_path) # Run the container try: container_id = self._docker.sync.create_container(environment, enable_network, mem_limit, task_path, sockets_path, course_common_path, course_common_student_path, ports) except Exception as e: self._logger.warning("Cannot create container! %s", str(e), exc_info=True) shutil.rmtree(container_path) for p in ports: self._external_ports.add(ports[p]) raise CannotCreateJobException('Cannot create container.') # Store info self._containers_running[container_id] = message, container_path, future_results self._container_for_job[message.job_id] = container_id self._student_containers_for_job[message.job_id] = set() if len(ports) != 0: self._assigned_external_ports[container_id] = list(ports.values()) try: # Start the container self._docker.sync.start_container(container_id) except Exception as e: self._logger.warning("Cannot start container! %s", str(e), exc_info=True) shutil.rmtree(container_path) for p in ports: self._external_ports.add(ports[p]) raise CannotCreateJobException('Cannot start container') return { "job_id": message.job_id, "container_id": container_id, "inputdata": message.inputdata, "debug": debug, "ports": ports, "orig_env": environment_name, "orig_memory_limit": mem_limit, "orig_time_limit": time_limit, "orig_hard_time_limit": hard_time_limit, "sockets_path": sockets_path, "student_path": student_path, "systemfiles_path": systemfiles_path, "course_common_student_path": course_common_student_path }
Synchronous part of _new_job. Creates needed directories, copy files, and starts the container.
Below is the the instruction that describes the task: ### Input: Synchronous part of _new_job. Creates needed directories, copy files, and starts the container. ### Response: def __new_job_sync(self, message, future_results): """ Synchronous part of _new_job. Creates needed directories, copy files, and starts the container. """ course_id = message.course_id task_id = message.task_id debug = message.debug environment_name = message.environment enable_network = message.enable_network time_limit = message.time_limit hard_time_limit = message.hard_time_limit or time_limit * 3 mem_limit = message.mem_limit course_fs = self.tasks_fs.from_subfolder(course_id) task_fs = course_fs.from_subfolder(task_id) if not course_fs.exists() or not task_fs.exists(): self._logger.warning("Task %s/%s unavailable on this agent", course_id, task_id) raise CannotCreateJobException('Task unavailable on agent. Please retry later, the agents should synchronize soon. ' 'If the error persists, please contact your course administrator.') # Check for realistic memory limit value if mem_limit < 20: mem_limit = 20 elif mem_limit > self._max_memory_per_slot: self._logger.warning("Task %s/%s ask for too much memory (%dMB)! Available: %dMB", course_id, task_id, mem_limit, self._max_memory_per_slot) raise CannotCreateJobException('Not enough memory on agent (available: %dMB). Please contact your course administrator.' % self._max_memory_per_slot) if environment_name not in self._containers: self._logger.warning("Task %s/%s ask for an unknown environment %s (not in aliases)", course_id, task_id, environment_name) raise CannotCreateJobException('Unknown container. Please contact your course administrator.') environment = self._containers[environment_name]["id"] ports_needed = self._containers[environment_name]["ports"] if debug == "ssh" and 22 not in ports_needed: ports_needed.append(22) ports = {} if len(ports_needed) > 0: time_limit = 30 * 60 hard_time_limit = 30 * 60 for p in ports_needed: if len(self._external_ports) == 0: self._logger.warning("User asked for a port but no one are available") raise CannotCreateJobException('No ports are available right now. Please retry later.') ports[p] = self._external_ports.pop() # Create directories for storing all the data for the job try: container_path = tempfile.mkdtemp(dir=self._tmp_dir) except Exception as e: self._logger.error("Cannot make container temp directory! %s", str(e), exc_info=True) for p in ports: self._external_ports.add(ports[p]) raise CannotCreateJobException('Cannot make container temp directory.') task_path = path_join(container_path, 'task') # tmp_dir/id/task/ course_path = path_join(container_path, 'course') sockets_path = path_join(container_path, 'sockets') # tmp_dir/id/socket/ student_path = path_join(task_path, 'student') # tmp_dir/id/task/student/ systemfiles_path = path_join(task_path, 'systemfiles') # tmp_dir/id/task/systemfiles/ course_common_path = path_join(course_path, 'common') course_common_student_path = path_join(course_path, 'common', 'student') # Create the needed directories os.mkdir(sockets_path) os.chmod(container_path, 0o777) os.chmod(sockets_path, 0o777) os.mkdir(course_path) # TODO: avoid copy task_fs.copy_from(None, task_path) os.chmod(task_path, 0o777) if not os.path.exists(student_path): os.mkdir(student_path) os.chmod(student_path, 0o777) # Copy common and common/student if needed # TODO: avoid copy if course_fs.from_subfolder("$common").exists(): course_fs.from_subfolder("$common").copy_from(None, course_common_path) else: os.mkdir(course_common_path) if course_fs.from_subfolder("$common").from_subfolder("student").exists(): course_fs.from_subfolder("$common").from_subfolder("student").copy_from(None, course_common_student_path) else: os.mkdir(course_common_student_path) # Run the container try: container_id = self._docker.sync.create_container(environment, enable_network, mem_limit, task_path, sockets_path, course_common_path, course_common_student_path, ports) except Exception as e: self._logger.warning("Cannot create container! %s", str(e), exc_info=True) shutil.rmtree(container_path) for p in ports: self._external_ports.add(ports[p]) raise CannotCreateJobException('Cannot create container.') # Store info self._containers_running[container_id] = message, container_path, future_results self._container_for_job[message.job_id] = container_id self._student_containers_for_job[message.job_id] = set() if len(ports) != 0: self._assigned_external_ports[container_id] = list(ports.values()) try: # Start the container self._docker.sync.start_container(container_id) except Exception as e: self._logger.warning("Cannot start container! %s", str(e), exc_info=True) shutil.rmtree(container_path) for p in ports: self._external_ports.add(ports[p]) raise CannotCreateJobException('Cannot start container') return { "job_id": message.job_id, "container_id": container_id, "inputdata": message.inputdata, "debug": debug, "ports": ports, "orig_env": environment_name, "orig_memory_limit": mem_limit, "orig_time_limit": time_limit, "orig_hard_time_limit": hard_time_limit, "sockets_path": sockets_path, "student_path": student_path, "systemfiles_path": systemfiles_path, "course_common_student_path": course_common_student_path }
def DOM_node_to_XML(tree, xml_declaration=True): """ Prints a DOM tree to its Unicode representation. :param tree: the input DOM tree :type tree: an ``xml.etree.ElementTree.Element`` object :param xml_declaration: if ``True`` (default) prints a leading XML declaration line :type xml_declaration: bool :returns: Unicode object """ result = ET.tostring(tree, encoding='utf8', method='xml').decode('utf-8') if not xml_declaration: result = result.split("<?xml version='1.0' encoding='utf8'?>\n")[1] return result
Prints a DOM tree to its Unicode representation. :param tree: the input DOM tree :type tree: an ``xml.etree.ElementTree.Element`` object :param xml_declaration: if ``True`` (default) prints a leading XML declaration line :type xml_declaration: bool :returns: Unicode object
Below is the the instruction that describes the task: ### Input: Prints a DOM tree to its Unicode representation. :param tree: the input DOM tree :type tree: an ``xml.etree.ElementTree.Element`` object :param xml_declaration: if ``True`` (default) prints a leading XML declaration line :type xml_declaration: bool :returns: Unicode object ### Response: def DOM_node_to_XML(tree, xml_declaration=True): """ Prints a DOM tree to its Unicode representation. :param tree: the input DOM tree :type tree: an ``xml.etree.ElementTree.Element`` object :param xml_declaration: if ``True`` (default) prints a leading XML declaration line :type xml_declaration: bool :returns: Unicode object """ result = ET.tostring(tree, encoding='utf8', method='xml').decode('utf-8') if not xml_declaration: result = result.split("<?xml version='1.0' encoding='utf8'?>\n")[1] return result
def _hat_integral(self, x): """Integral of the `hat` function, used for sampling. We choose a `hat` function, h(x) = x^(-power), which is a continuous (unnormalized) density touching each positive integer at the (unnormalized) pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt; which is needed for sampling purposes. Arguments: x: A Tensor of points x at which to evaluate H(x). Returns: A Tensor containing evaluation H(x) at x. """ x = tf.cast(x, self.power.dtype) t = self.power - 1. return tf.exp((-t) * tf.math.log1p(x) - tf.math.log(t))
Integral of the `hat` function, used for sampling. We choose a `hat` function, h(x) = x^(-power), which is a continuous (unnormalized) density touching each positive integer at the (unnormalized) pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt; which is needed for sampling purposes. Arguments: x: A Tensor of points x at which to evaluate H(x). Returns: A Tensor containing evaluation H(x) at x.
Below is the the instruction that describes the task: ### Input: Integral of the `hat` function, used for sampling. We choose a `hat` function, h(x) = x^(-power), which is a continuous (unnormalized) density touching each positive integer at the (unnormalized) pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt; which is needed for sampling purposes. Arguments: x: A Tensor of points x at which to evaluate H(x). Returns: A Tensor containing evaluation H(x) at x. ### Response: def _hat_integral(self, x): """Integral of the `hat` function, used for sampling. We choose a `hat` function, h(x) = x^(-power), which is a continuous (unnormalized) density touching each positive integer at the (unnormalized) pmf. This function implements `hat` integral: H(x) = int_x^inf h(t) dt; which is needed for sampling purposes. Arguments: x: A Tensor of points x at which to evaluate H(x). Returns: A Tensor containing evaluation H(x) at x. """ x = tf.cast(x, self.power.dtype) t = self.power - 1. return tf.exp((-t) * tf.math.log1p(x) - tf.math.log(t))
def get_asset_temporal_assignment_session_for_repository(self, repository_id, proxy): """Gets the session for assigning temporal coverage of an asset for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetTemporalAssignmentSession) - an AssetTemporalAssignmentSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_temporal_assignment() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_temporal_assignment() and supports_visible_federation() are true. """ if not repository_id: raise NullArgument() if not self.supports_asset_temporal_assignment(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed('import error') proxy = self._convert_proxy(proxy) try: session = sessions.AssetTemporalAssignmentSession(repository_id, proxy, runtime=self._runtime) except AttributeError: raise OperationFailed('attribute error') return session
Gets the session for assigning temporal coverage of an asset for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetTemporalAssignmentSession) - an AssetTemporalAssignmentSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_temporal_assignment() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_temporal_assignment() and supports_visible_federation() are true.
Below is the the instruction that describes the task: ### Input: Gets the session for assigning temporal coverage of an asset for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetTemporalAssignmentSession) - an AssetTemporalAssignmentSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_temporal_assignment() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_temporal_assignment() and supports_visible_federation() are true. ### Response: def get_asset_temporal_assignment_session_for_repository(self, repository_id, proxy): """Gets the session for assigning temporal coverage of an asset for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetTemporalAssignmentSession) - an AssetTemporalAssignmentSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_temporal_assignment() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_temporal_assignment() and supports_visible_federation() are true. """ if not repository_id: raise NullArgument() if not self.supports_asset_temporal_assignment(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed('import error') proxy = self._convert_proxy(proxy) try: session = sessions.AssetTemporalAssignmentSession(repository_id, proxy, runtime=self._runtime) except AttributeError: raise OperationFailed('attribute error') return session
def modify_macho_file_headers(macho_file_path, modificator_func): """ Modifies headers of a Mach-O file at the given path by calling the modificator function on each header. Returns True on success, otherwise rises an exeption (e.g. from macholib) """ if not os.path.isfile(macho_file_path): raise Exception("You must specify a real executable path as a target") return False m = MachO(macho_file_path) apply_to_headers(m, modificator_func) save_macho(m, macho_file_path) return True
Modifies headers of a Mach-O file at the given path by calling the modificator function on each header. Returns True on success, otherwise rises an exeption (e.g. from macholib)
Below is the the instruction that describes the task: ### Input: Modifies headers of a Mach-O file at the given path by calling the modificator function on each header. Returns True on success, otherwise rises an exeption (e.g. from macholib) ### Response: def modify_macho_file_headers(macho_file_path, modificator_func): """ Modifies headers of a Mach-O file at the given path by calling the modificator function on each header. Returns True on success, otherwise rises an exeption (e.g. from macholib) """ if not os.path.isfile(macho_file_path): raise Exception("You must specify a real executable path as a target") return False m = MachO(macho_file_path) apply_to_headers(m, modificator_func) save_macho(m, macho_file_path) return True
def commit_hash(self): """Return the current commit hash if available. This is not a required task so best effort is fine. In other words this is not guaranteed to work 100% of the time. """ commit_hash = None branch = None branch_file = '.git/HEAD' # ref: refs/heads/develop # get current branch if os.path.isfile(branch_file): with open(branch_file, 'r') as f: try: branch = f.read().strip().split('/')[2] except IndexError: pass # get commit hash if branch: hash_file = '.git/refs/heads/{}'.format(branch) if os.path.isfile(hash_file): with open(hash_file, 'r') as f: commit_hash = f.read().strip() return commit_hash
Return the current commit hash if available. This is not a required task so best effort is fine. In other words this is not guaranteed to work 100% of the time.
Below is the the instruction that describes the task: ### Input: Return the current commit hash if available. This is not a required task so best effort is fine. In other words this is not guaranteed to work 100% of the time. ### Response: def commit_hash(self): """Return the current commit hash if available. This is not a required task so best effort is fine. In other words this is not guaranteed to work 100% of the time. """ commit_hash = None branch = None branch_file = '.git/HEAD' # ref: refs/heads/develop # get current branch if os.path.isfile(branch_file): with open(branch_file, 'r') as f: try: branch = f.read().strip().split('/')[2] except IndexError: pass # get commit hash if branch: hash_file = '.git/refs/heads/{}'.format(branch) if os.path.isfile(hash_file): with open(hash_file, 'r') as f: commit_hash = f.read().strip() return commit_hash
def _eval(self): "Evaluates a individual using recursion and self._pos as pointer" pos = self._pos self._pos += 1 node = self._ind[pos] if isinstance(node, Function): args = [self._eval() for x in range(node.nargs)] node.eval(args) for x in args: x.hy = None x.hy_test = None else: node.eval(self._X) return node
Evaluates a individual using recursion and self._pos as pointer
Below is the the instruction that describes the task: ### Input: Evaluates a individual using recursion and self._pos as pointer ### Response: def _eval(self): "Evaluates a individual using recursion and self._pos as pointer" pos = self._pos self._pos += 1 node = self._ind[pos] if isinstance(node, Function): args = [self._eval() for x in range(node.nargs)] node.eval(args) for x in args: x.hy = None x.hy_test = None else: node.eval(self._X) return node
def fit(self, X=None, y=None, **kwargs): """Fit the blocks of this pipeline. Sequentially call the `fit` and the `produce` methods of each block, capturing the outputs each `produce` method before calling the `fit` method of the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `fit` and `produce` calls will be taken. Args: X: Fit Data, which the pipeline will learn from. y: Fit Data labels, which the pipeline will use to learn how to behave. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks. """ context = { 'X': X, 'y': y } context.update(kwargs) last_block_name = list(self.blocks.keys())[-1] for block_name, block in self.blocks.items(): LOGGER.debug("Fitting block %s", block_name) try: fit_args = self._get_block_args(block_name, block.fit_args, context) block.fit(**fit_args) except Exception: LOGGER.exception("Exception caught fitting MLBlock %s", block_name) raise if block_name != last_block_name: LOGGER.debug("Producing block %s", block_name) try: produce_args = self._get_block_args(block_name, block.produce_args, context) outputs = block.produce(**produce_args) output_dict = self._get_outputs(block_name, outputs, block.produce_output) context.update(output_dict) except Exception: LOGGER.exception("Exception caught producing MLBlock %s", block_name) raise
Fit the blocks of this pipeline. Sequentially call the `fit` and the `produce` methods of each block, capturing the outputs each `produce` method before calling the `fit` method of the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `fit` and `produce` calls will be taken. Args: X: Fit Data, which the pipeline will learn from. y: Fit Data labels, which the pipeline will use to learn how to behave. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks.
Below is the the instruction that describes the task: ### Input: Fit the blocks of this pipeline. Sequentially call the `fit` and the `produce` methods of each block, capturing the outputs each `produce` method before calling the `fit` method of the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `fit` and `produce` calls will be taken. Args: X: Fit Data, which the pipeline will learn from. y: Fit Data labels, which the pipeline will use to learn how to behave. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks. ### Response: def fit(self, X=None, y=None, **kwargs): """Fit the blocks of this pipeline. Sequentially call the `fit` and the `produce` methods of each block, capturing the outputs each `produce` method before calling the `fit` method of the next one. During the whole process a context dictionary is built, where both the passed arguments and the captured outputs of the `produce` methods are stored, and from which the arguments for the next `fit` and `produce` calls will be taken. Args: X: Fit Data, which the pipeline will learn from. y: Fit Data labels, which the pipeline will use to learn how to behave. **kwargs: Any additional keyword arguments will be directly added to the context dictionary and available for the blocks. """ context = { 'X': X, 'y': y } context.update(kwargs) last_block_name = list(self.blocks.keys())[-1] for block_name, block in self.blocks.items(): LOGGER.debug("Fitting block %s", block_name) try: fit_args = self._get_block_args(block_name, block.fit_args, context) block.fit(**fit_args) except Exception: LOGGER.exception("Exception caught fitting MLBlock %s", block_name) raise if block_name != last_block_name: LOGGER.debug("Producing block %s", block_name) try: produce_args = self._get_block_args(block_name, block.produce_args, context) outputs = block.produce(**produce_args) output_dict = self._get_outputs(block_name, outputs, block.produce_output) context.update(output_dict) except Exception: LOGGER.exception("Exception caught producing MLBlock %s", block_name) raise
def independent_interdomain_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): """ The inducing outputs live in the g-space (R^L). Interdomain conditional calculation. :param Kmn: M x L x N x P :param Kmm: L x M x M :param Knn: N x P or N x N or P x N x N or N x P x N x P :param f: data matrix, M x L :param q_sqrt: L x M x M or M x L :param full_cov: calculate covariance between inputs :param full_output_cov: calculate covariance between outputs :param white: use whitened representation :return: - mean: N x P - variance: N x P, N x P x P, P x N x N, N x P x N x P """ logger.debug("independent_interdomain_conditional") M, L, N, P = [tf.shape(Kmn)[i] for i in range(Kmn.shape.ndims)] Lm = tf.cholesky(Kmm) # L x M x M # Compute the projection matrix A Kmn = tf.reshape(tf.transpose(Kmn, (1, 0, 2, 3)), (L, M, N * P)) A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # L x M x M * L x M x NP -> L x M x NP Ar = tf.reshape(A, (L, M, N, P)) # compute the covariance due to the conditioning if full_cov and full_output_cov: fvar = Knn - tf.tensordot(Ar, Ar, [[0, 1], [0, 1]]) # N x P x N x P elif full_cov and not full_output_cov: At = tf.reshape(tf.transpose(Ar), (P, N, M * L)) # P x N x ML fvar = Knn - tf.matmul(At, At, transpose_b=True) # P x N x N elif not full_cov and full_output_cov: At = tf.reshape(tf.transpose(Ar, [2, 3, 1, 0]), (N, P, M * L)) # N x P x ML fvar = Knn - tf.matmul(At, At, transpose_b=True) # N x P x P elif not full_cov and not full_output_cov: fvar = Knn - tf.reshape(tf.reduce_sum(tf.square(A), [0, 1]), (N, P)) # Knn: N x P # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(Lm, Ar) # L x M x M * L x M x NP -> L x M x NP Ar = tf.reshape(A, (L, M, N, P)) fmean = tf.tensordot(Ar, f, [[1, 0], [0, 1]]) # N x P if q_sqrt is not None: if q_sqrt.shape.ndims == 3: Lf = tf.matrix_band_part(q_sqrt, -1, 0) # L x M x M LTA = tf.matmul(Lf, A, transpose_a=True) # L x M x M * L x M x NP -> L x M x NP else: # q_sqrt M x L LTA = (A * tf.transpose(q_sqrt)[..., None]) # L x M x NP if full_cov and full_output_cov: LTAr = tf.reshape(LTA, (L * M, N * P)) fvar = fvar + tf.reshape(tf.matmul(LTAr, LTAr, transpose_a=True), (N, P, N, P)) elif full_cov and not full_output_cov: LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [2, 0, 1]) # P x LM x N fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # P x N x N elif not full_cov and full_output_cov: LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [1, 0, 2]) # N x LM x P fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # N x P x P elif not full_cov and not full_output_cov: fvar = fvar + tf.reshape(tf.reduce_sum(tf.square(LTA), (0, 1)), (N, P)) return fmean, fvar
The inducing outputs live in the g-space (R^L). Interdomain conditional calculation. :param Kmn: M x L x N x P :param Kmm: L x M x M :param Knn: N x P or N x N or P x N x N or N x P x N x P :param f: data matrix, M x L :param q_sqrt: L x M x M or M x L :param full_cov: calculate covariance between inputs :param full_output_cov: calculate covariance between outputs :param white: use whitened representation :return: - mean: N x P - variance: N x P, N x P x P, P x N x N, N x P x N x P
Below is the the instruction that describes the task: ### Input: The inducing outputs live in the g-space (R^L). Interdomain conditional calculation. :param Kmn: M x L x N x P :param Kmm: L x M x M :param Knn: N x P or N x N or P x N x N or N x P x N x P :param f: data matrix, M x L :param q_sqrt: L x M x M or M x L :param full_cov: calculate covariance between inputs :param full_output_cov: calculate covariance between outputs :param white: use whitened representation :return: - mean: N x P - variance: N x P, N x P x P, P x N x N, N x P x N x P ### Response: def independent_interdomain_conditional(Kmn, Kmm, Knn, f, *, full_cov=False, full_output_cov=False, q_sqrt=None, white=False): """ The inducing outputs live in the g-space (R^L). Interdomain conditional calculation. :param Kmn: M x L x N x P :param Kmm: L x M x M :param Knn: N x P or N x N or P x N x N or N x P x N x P :param f: data matrix, M x L :param q_sqrt: L x M x M or M x L :param full_cov: calculate covariance between inputs :param full_output_cov: calculate covariance between outputs :param white: use whitened representation :return: - mean: N x P - variance: N x P, N x P x P, P x N x N, N x P x N x P """ logger.debug("independent_interdomain_conditional") M, L, N, P = [tf.shape(Kmn)[i] for i in range(Kmn.shape.ndims)] Lm = tf.cholesky(Kmm) # L x M x M # Compute the projection matrix A Kmn = tf.reshape(tf.transpose(Kmn, (1, 0, 2, 3)), (L, M, N * P)) A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # L x M x M * L x M x NP -> L x M x NP Ar = tf.reshape(A, (L, M, N, P)) # compute the covariance due to the conditioning if full_cov and full_output_cov: fvar = Knn - tf.tensordot(Ar, Ar, [[0, 1], [0, 1]]) # N x P x N x P elif full_cov and not full_output_cov: At = tf.reshape(tf.transpose(Ar), (P, N, M * L)) # P x N x ML fvar = Knn - tf.matmul(At, At, transpose_b=True) # P x N x N elif not full_cov and full_output_cov: At = tf.reshape(tf.transpose(Ar, [2, 3, 1, 0]), (N, P, M * L)) # N x P x ML fvar = Knn - tf.matmul(At, At, transpose_b=True) # N x P x P elif not full_cov and not full_output_cov: fvar = Knn - tf.reshape(tf.reduce_sum(tf.square(A), [0, 1]), (N, P)) # Knn: N x P # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(Lm, Ar) # L x M x M * L x M x NP -> L x M x NP Ar = tf.reshape(A, (L, M, N, P)) fmean = tf.tensordot(Ar, f, [[1, 0], [0, 1]]) # N x P if q_sqrt is not None: if q_sqrt.shape.ndims == 3: Lf = tf.matrix_band_part(q_sqrt, -1, 0) # L x M x M LTA = tf.matmul(Lf, A, transpose_a=True) # L x M x M * L x M x NP -> L x M x NP else: # q_sqrt M x L LTA = (A * tf.transpose(q_sqrt)[..., None]) # L x M x NP if full_cov and full_output_cov: LTAr = tf.reshape(LTA, (L * M, N * P)) fvar = fvar + tf.reshape(tf.matmul(LTAr, LTAr, transpose_a=True), (N, P, N, P)) elif full_cov and not full_output_cov: LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [2, 0, 1]) # P x LM x N fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # P x N x N elif not full_cov and full_output_cov: LTAr = tf.transpose(tf.reshape(LTA, (L * M, N, P)), [1, 0, 2]) # N x LM x P fvar = fvar + tf.matmul(LTAr, LTAr, transpose_a=True) # N x P x P elif not full_cov and not full_output_cov: fvar = fvar + tf.reshape(tf.reduce_sum(tf.square(LTA), (0, 1)), (N, P)) return fmean, fvar
def bool_2_indices(a): """ Convert boolean array into a 2D array of (start, stop) pairs. """ if any(a): lims = [] lims.append(np.where(a[:-1] != a[1:])[0]) if a[0]: lims.append([0]) if a[-1]: lims.append([len(a) - 1]) lims = np.concatenate(lims) lims.sort() return np.reshape(lims, (lims.size // 2, 2)) else: return None
Convert boolean array into a 2D array of (start, stop) pairs.
Below is the the instruction that describes the task: ### Input: Convert boolean array into a 2D array of (start, stop) pairs. ### Response: def bool_2_indices(a): """ Convert boolean array into a 2D array of (start, stop) pairs. """ if any(a): lims = [] lims.append(np.where(a[:-1] != a[1:])[0]) if a[0]: lims.append([0]) if a[-1]: lims.append([len(a) - 1]) lims = np.concatenate(lims) lims.sort() return np.reshape(lims, (lims.size // 2, 2)) else: return None
def get_process_tag(program, ccd, version='p'): """ make a process tag have a suffix indicating which ccd its for. @param program: Name of the process that a tag is built for. @param ccd: the CCD number that this process ran on. @param version: The version of the exposure (s, p, o) that the process ran on. @return: The string that represents the processing tag. """ return "%s_%s%s" % (program, str(version), str(ccd).zfill(2))
make a process tag have a suffix indicating which ccd its for. @param program: Name of the process that a tag is built for. @param ccd: the CCD number that this process ran on. @param version: The version of the exposure (s, p, o) that the process ran on. @return: The string that represents the processing tag.
Below is the the instruction that describes the task: ### Input: make a process tag have a suffix indicating which ccd its for. @param program: Name of the process that a tag is built for. @param ccd: the CCD number that this process ran on. @param version: The version of the exposure (s, p, o) that the process ran on. @return: The string that represents the processing tag. ### Response: def get_process_tag(program, ccd, version='p'): """ make a process tag have a suffix indicating which ccd its for. @param program: Name of the process that a tag is built for. @param ccd: the CCD number that this process ran on. @param version: The version of the exposure (s, p, o) that the process ran on. @return: The string that represents the processing tag. """ return "%s_%s%s" % (program, str(version), str(ccd).zfill(2))
def __find_child_classes(self, file): """ Return a list of all <__base_class> based classes found in <file> """ child_classes = [] folder, name = os.path.split(file) name = os.path.splitext(name)[0] import imp module = imp.load_source(name, file) def filter_classes(m): if inspect.isclass(m): if inspect.getmro(m)[1] == self.__base_class: return True return False for name, obj in inspect.getmembers(module, filter_classes): child_classes.append(obj) return child_classes
Return a list of all <__base_class> based classes found in <file>
Below is the the instruction that describes the task: ### Input: Return a list of all <__base_class> based classes found in <file> ### Response: def __find_child_classes(self, file): """ Return a list of all <__base_class> based classes found in <file> """ child_classes = [] folder, name = os.path.split(file) name = os.path.splitext(name)[0] import imp module = imp.load_source(name, file) def filter_classes(m): if inspect.isclass(m): if inspect.getmro(m)[1] == self.__base_class: return True return False for name, obj in inspect.getmembers(module, filter_classes): child_classes.append(obj) return child_classes
def print(self, *args, end='\n', file=None): """Convenience function so you don't need to remember to put the \n at the end of the line. """ if file is None: file = self.stdout s = ' '.join(str(arg) for arg in args) + end file.write(s)
Convenience function so you don't need to remember to put the \n at the end of the line.
Below is the the instruction that describes the task: ### Input: Convenience function so you don't need to remember to put the \n at the end of the line. ### Response: def print(self, *args, end='\n', file=None): """Convenience function so you don't need to remember to put the \n at the end of the line. """ if file is None: file = self.stdout s = ' '.join(str(arg) for arg in args) + end file.write(s)
def dim_reduce_data(data, d): """ Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix """ genes, cells = data.shape distances = np.zeros((cells, cells)) for i in range(cells): for j in range(cells): distances[i,j] = poisson_dist(data[:,i], data[:,j]) # do MDS on the distance matrix (procedure from Wikipedia) proximity = distances**2 J = np.eye(cells) - 1./cells B = -0.5*np.dot(J, np.dot(proximity, J)) # B should be symmetric, so we can use eigh e_val, e_vec = np.linalg.eigh(B) # Note: lam should be ordered to be the largest eigenvalues lam = np.diag(e_val[-d:])[::-1] #lam = max_or_zero(lam) E = e_vec[:,-d:][::-1] X = np.dot(E, lam**0.5) return X
Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix
Below is the the instruction that describes the task: ### Input: Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix ### Response: def dim_reduce_data(data, d): """ Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix """ genes, cells = data.shape distances = np.zeros((cells, cells)) for i in range(cells): for j in range(cells): distances[i,j] = poisson_dist(data[:,i], data[:,j]) # do MDS on the distance matrix (procedure from Wikipedia) proximity = distances**2 J = np.eye(cells) - 1./cells B = -0.5*np.dot(J, np.dot(proximity, J)) # B should be symmetric, so we can use eigh e_val, e_vec = np.linalg.eigh(B) # Note: lam should be ordered to be the largest eigenvalues lam = np.diag(e_val[-d:])[::-1] #lam = max_or_zero(lam) E = e_vec[:,-d:][::-1] X = np.dot(E, lam**0.5) return X
def compose(f, g): """Chain functions""" fun = lambda *args, **kwargs: f(g(*args, **kwargs)) fun.__name__ = "%s o %s" % (f.__name__, g.__name__) return fun
Chain functions
Below is the the instruction that describes the task: ### Input: Chain functions ### Response: def compose(f, g): """Chain functions""" fun = lambda *args, **kwargs: f(g(*args, **kwargs)) fun.__name__ = "%s o %s" % (f.__name__, g.__name__) return fun
def authenticate(self, request): """ Authenticate the request and return a two-tuple of (user, token). """ auth = get_authorization_header(request).split() if not auth or auth[0].lower() != b'token': return None if len(auth) == 1: msg = _('Invalid auth token header. No credentials provided.') raise AuthenticationFailed(msg) elif len(auth) > 2: msg = _('Invalid auth token.') raise AuthenticationFailed(msg) try: token = urlsafe_b64decode(auth[1]) except ValueError: msg = _('Invalid auth token.') raise AuthenticationFailed(msg) return self.authenticate_credentials(token, request)
Authenticate the request and return a two-tuple of (user, token).
Below is the the instruction that describes the task: ### Input: Authenticate the request and return a two-tuple of (user, token). ### Response: def authenticate(self, request): """ Authenticate the request and return a two-tuple of (user, token). """ auth = get_authorization_header(request).split() if not auth or auth[0].lower() != b'token': return None if len(auth) == 1: msg = _('Invalid auth token header. No credentials provided.') raise AuthenticationFailed(msg) elif len(auth) > 2: msg = _('Invalid auth token.') raise AuthenticationFailed(msg) try: token = urlsafe_b64decode(auth[1]) except ValueError: msg = _('Invalid auth token.') raise AuthenticationFailed(msg) return self.authenticate_credentials(token, request)
def update(self, num): """Update metrics with the new number.""" num = float(num) self.count += 1 self.low = min(self.low, num) self.high = max(self.high, num) # Welford's online mean and variance algorithm. delta = num - self.mean self.mean = self.mean + delta / self.count delta2 = num - self.mean self._rolling_variance = self._rolling_variance + delta * delta2 if self.count > 1: self.deviation = math.sqrt(self._rolling_variance / (self.count - 1)) else: self.deviation = 0.0
Update metrics with the new number.
Below is the the instruction that describes the task: ### Input: Update metrics with the new number. ### Response: def update(self, num): """Update metrics with the new number.""" num = float(num) self.count += 1 self.low = min(self.low, num) self.high = max(self.high, num) # Welford's online mean and variance algorithm. delta = num - self.mean self.mean = self.mean + delta / self.count delta2 = num - self.mean self._rolling_variance = self._rolling_variance + delta * delta2 if self.count > 1: self.deviation = math.sqrt(self._rolling_variance / (self.count - 1)) else: self.deviation = 0.0
def compute_dominators(nodes): ''' Naive implementation of Cooper, Harvey, Kennedy algo See 'A Simple,Fast Dominance Algorithm' Compute strict domniators ''' changed = True for n in nodes: n.dominators = set(nodes) while changed: changed = False for node in nodes: new_set = intersection_predecessor(node).union({node}) if new_set != node.dominators: node.dominators = new_set changed = True # compute immediate dominator for node in nodes: idom_candidates = set(node.dominators) idom_candidates.remove(node) for dominator in node.dominators: if dominator != node: [idom_candidates.remove(d) for d in dominator.dominators if d in idom_candidates and d!=dominator] assert len(idom_candidates)<=1 if idom_candidates: idom = idom_candidates.pop() node.immediate_dominator = idom idom.dominator_successors.add(node)
Naive implementation of Cooper, Harvey, Kennedy algo See 'A Simple,Fast Dominance Algorithm' Compute strict domniators
Below is the the instruction that describes the task: ### Input: Naive implementation of Cooper, Harvey, Kennedy algo See 'A Simple,Fast Dominance Algorithm' Compute strict domniators ### Response: def compute_dominators(nodes): ''' Naive implementation of Cooper, Harvey, Kennedy algo See 'A Simple,Fast Dominance Algorithm' Compute strict domniators ''' changed = True for n in nodes: n.dominators = set(nodes) while changed: changed = False for node in nodes: new_set = intersection_predecessor(node).union({node}) if new_set != node.dominators: node.dominators = new_set changed = True # compute immediate dominator for node in nodes: idom_candidates = set(node.dominators) idom_candidates.remove(node) for dominator in node.dominators: if dominator != node: [idom_candidates.remove(d) for d in dominator.dominators if d in idom_candidates and d!=dominator] assert len(idom_candidates)<=1 if idom_candidates: idom = idom_candidates.pop() node.immediate_dominator = idom idom.dominator_successors.add(node)
def export_hcurves_csv(ekey, dstore): """ Exports the hazard curves into several .csv files :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object """ oq = dstore['oqparam'] info = get_info(dstore) rlzs_assoc = dstore['csm_info'].get_rlzs_assoc() R = len(rlzs_assoc.realizations) sitecol = dstore['sitecol'] sitemesh = get_mesh(sitecol) key, kind, fmt = get_kkf(ekey) fnames = [] checksum = dstore.get_attr('/', 'checksum32') hmap_dt = oq.hmap_dt() for kind in oq.get_kinds(kind, R): fname = hazard_curve_name(dstore, (key, fmt), kind, rlzs_assoc) comment = _comment(rlzs_assoc, kind, oq.investigation_time) if (key in ('hmaps', 'uhs') and oq.uniform_hazard_spectra or oq.hazard_maps): hmap = extract(dstore, 'hmaps?kind=' + kind)[kind] if key == 'uhs' and oq.poes and oq.uniform_hazard_spectra: uhs_curves = calc.make_uhs(hmap, info) writers.write_csv( fname, util.compose_arrays(sitemesh, uhs_curves), comment=comment + ', checksum=%d' % checksum) fnames.append(fname) elif key == 'hmaps' and oq.poes and oq.hazard_maps: fnames.extend( export_hmaps_csv(ekey, fname, sitemesh, hmap.flatten().view(hmap_dt), comment + ', checksum=%d' % checksum)) elif key == 'hcurves': hcurves = extract(dstore, 'hcurves?kind=' + kind)[kind] fnames.extend( export_hcurves_by_imt_csv( ekey, kind, rlzs_assoc, fname, sitecol, hcurves, oq, checksum)) return sorted(fnames)
Exports the hazard curves into several .csv files :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object
Below is the the instruction that describes the task: ### Input: Exports the hazard curves into several .csv files :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object ### Response: def export_hcurves_csv(ekey, dstore): """ Exports the hazard curves into several .csv files :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object """ oq = dstore['oqparam'] info = get_info(dstore) rlzs_assoc = dstore['csm_info'].get_rlzs_assoc() R = len(rlzs_assoc.realizations) sitecol = dstore['sitecol'] sitemesh = get_mesh(sitecol) key, kind, fmt = get_kkf(ekey) fnames = [] checksum = dstore.get_attr('/', 'checksum32') hmap_dt = oq.hmap_dt() for kind in oq.get_kinds(kind, R): fname = hazard_curve_name(dstore, (key, fmt), kind, rlzs_assoc) comment = _comment(rlzs_assoc, kind, oq.investigation_time) if (key in ('hmaps', 'uhs') and oq.uniform_hazard_spectra or oq.hazard_maps): hmap = extract(dstore, 'hmaps?kind=' + kind)[kind] if key == 'uhs' and oq.poes and oq.uniform_hazard_spectra: uhs_curves = calc.make_uhs(hmap, info) writers.write_csv( fname, util.compose_arrays(sitemesh, uhs_curves), comment=comment + ', checksum=%d' % checksum) fnames.append(fname) elif key == 'hmaps' and oq.poes and oq.hazard_maps: fnames.extend( export_hmaps_csv(ekey, fname, sitemesh, hmap.flatten().view(hmap_dt), comment + ', checksum=%d' % checksum)) elif key == 'hcurves': hcurves = extract(dstore, 'hcurves?kind=' + kind)[kind] fnames.extend( export_hcurves_by_imt_csv( ekey, kind, rlzs_assoc, fname, sitecol, hcurves, oq, checksum)) return sorted(fnames)
def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ # Return the same namespace if there are no included namespaces. if not self._regex_map and not self._plain: return plain_target_ns src_name_set = self._reverse_plain.get(plain_target_ns) if src_name_set: # Return the first (and only) item in the set for src_name in src_name_set: return src_name # The target namespace could also exist in the wildcard namespaces for _, namespace in self._regex_map: original_name = match_replace_regex( namespace_to_regex(namespace.dest_name), plain_target_ns, namespace.source_name, ) if original_name: return original_name return None
Given a plain target namespace, return the corresponding source namespace.
Below is the the instruction that describes the task: ### Input: Given a plain target namespace, return the corresponding source namespace. ### Response: def unmap_namespace(self, plain_target_ns): """Given a plain target namespace, return the corresponding source namespace. """ # Return the same namespace if there are no included namespaces. if not self._regex_map and not self._plain: return plain_target_ns src_name_set = self._reverse_plain.get(plain_target_ns) if src_name_set: # Return the first (and only) item in the set for src_name in src_name_set: return src_name # The target namespace could also exist in the wildcard namespaces for _, namespace in self._regex_map: original_name = match_replace_regex( namespace_to_regex(namespace.dest_name), plain_target_ns, namespace.source_name, ) if original_name: return original_name return None