code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def wp_loop(self): '''close the loop on a mission''' loader = self.wploader if loader.count() < 2: print("Not enough waypoints (%u)" % loader.count()) return wp = loader.wp(loader.count()-2) if wp.command == mavutil.mavlink.MAV_CMD_DO_JUMP: print("Mission is already looped") return wp = mavutil.mavlink.MAVLink_mission_item_message(0, 0, 0, 0, mavutil.mavlink.MAV_CMD_DO_JUMP, 0, 1, 1, -1, 0, 0, 0, 0, 0) loader.add(wp) self.loading_waypoints = True self.loading_waypoint_lasttime = time.time() self.master.waypoint_count_send(self.wploader.count()) print("Closed loop on mission")
close the loop on a mission
Below is the the instruction that describes the task: ### Input: close the loop on a mission ### Response: def wp_loop(self): '''close the loop on a mission''' loader = self.wploader if loader.count() < 2: print("Not enough waypoints (%u)" % loader.count()) return wp = loader.wp(loader.count()-2) if wp.command == mavutil.mavlink.MAV_CMD_DO_JUMP: print("Mission is already looped") return wp = mavutil.mavlink.MAVLink_mission_item_message(0, 0, 0, 0, mavutil.mavlink.MAV_CMD_DO_JUMP, 0, 1, 1, -1, 0, 0, 0, 0, 0) loader.add(wp) self.loading_waypoints = True self.loading_waypoint_lasttime = time.time() self.master.waypoint_count_send(self.wploader.count()) print("Closed loop on mission")
def conv3x3(in_planes, out_planes, fn, stride=1): """3x3 convolution with padding""" return fn(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
3x3 convolution with padding
Below is the the instruction that describes the task: ### Input: 3x3 convolution with padding ### Response: def conv3x3(in_planes, out_planes, fn, stride=1): """3x3 convolution with padding""" return fn(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def list_images(self): # type: () -> List[str] """ List images stored in the registry. Returns: list[str]: List of image names. """ r = self.get(self.registry_url + '/v2/_catalog', auth=self.auth) return r.json()['repositories']
List images stored in the registry. Returns: list[str]: List of image names.
Below is the the instruction that describes the task: ### Input: List images stored in the registry. Returns: list[str]: List of image names. ### Response: def list_images(self): # type: () -> List[str] """ List images stored in the registry. Returns: list[str]: List of image names. """ r = self.get(self.registry_url + '/v2/_catalog', auth=self.auth) return r.json()['repositories']
def term_to_binary(term, compressed=False): """ Encode Python types into Erlang terms in binary data """ data_uncompressed = _term_to_binary(term) if compressed is False: return b_chr(_TAG_VERSION) + data_uncompressed else: if compressed is True: compressed = 6 if compressed < 0 or compressed > 9: raise InputException('compressed in [0..9]') data_compressed = zlib.compress(data_uncompressed, compressed) size_uncompressed = len(data_uncompressed) if size_uncompressed > 4294967295: raise OutputException('uint32 overflow') return ( b_chr(_TAG_VERSION) + b_chr(_TAG_COMPRESSED_ZLIB) + struct.pack(b'>I', size_uncompressed) + data_compressed )
Encode Python types into Erlang terms in binary data
Below is the the instruction that describes the task: ### Input: Encode Python types into Erlang terms in binary data ### Response: def term_to_binary(term, compressed=False): """ Encode Python types into Erlang terms in binary data """ data_uncompressed = _term_to_binary(term) if compressed is False: return b_chr(_TAG_VERSION) + data_uncompressed else: if compressed is True: compressed = 6 if compressed < 0 or compressed > 9: raise InputException('compressed in [0..9]') data_compressed = zlib.compress(data_uncompressed, compressed) size_uncompressed = len(data_uncompressed) if size_uncompressed > 4294967295: raise OutputException('uint32 overflow') return ( b_chr(_TAG_VERSION) + b_chr(_TAG_COMPRESSED_ZLIB) + struct.pack(b'>I', size_uncompressed) + data_compressed )
def extractParamMap(self, extra=None): """ Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map """ if extra is None: extra = dict() paramMap = self._defaultParamMap.copy() paramMap.update(self._paramMap) paramMap.update(extra) return paramMap
Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map
Below is the the instruction that describes the task: ### Input: Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map ### Response: def extractParamMap(self, extra=None): """ Extracts the embedded default param values and user-supplied values, and then merges them with extra values from input into a flat param map, where the latter value is used if there exist conflicts, i.e., with ordering: default param values < user-supplied values < extra. :param extra: extra param values :return: merged param map """ if extra is None: extra = dict() paramMap = self._defaultParamMap.copy() paramMap.update(self._paramMap) paramMap.update(extra) return paramMap
def _int_to_words(self, pattern): """ Given a bit-pattern expressed an integer number, return a sequence of the individual words that make up the pattern. The number of bits per word will be obtained from the internal SPI interface. """ try: bits_required = int(ceil(log(pattern, 2))) + 1 except ValueError: # pattern == 0 (technically speaking, no bits are required to # transmit the value zero ;) bits_required = 1 shifts = range(0, bits_required, self._spi.bits_per_word)[::-1] mask = 2 ** self._spi.bits_per_word - 1 return [(pattern >> shift) & mask for shift in shifts]
Given a bit-pattern expressed an integer number, return a sequence of the individual words that make up the pattern. The number of bits per word will be obtained from the internal SPI interface.
Below is the the instruction that describes the task: ### Input: Given a bit-pattern expressed an integer number, return a sequence of the individual words that make up the pattern. The number of bits per word will be obtained from the internal SPI interface. ### Response: def _int_to_words(self, pattern): """ Given a bit-pattern expressed an integer number, return a sequence of the individual words that make up the pattern. The number of bits per word will be obtained from the internal SPI interface. """ try: bits_required = int(ceil(log(pattern, 2))) + 1 except ValueError: # pattern == 0 (technically speaking, no bits are required to # transmit the value zero ;) bits_required = 1 shifts = range(0, bits_required, self._spi.bits_per_word)[::-1] mask = 2 ** self._spi.bits_per_word - 1 return [(pattern >> shift) & mask for shift in shifts]
def com_google_fonts_check_metadata_normal_style(ttFont, font_metadata): """METADATA.pb font.style "normal" matches font internals?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import MacStyle if font_metadata.style != "normal": yield SKIP, "This check only applies to normal fonts." # FIXME: declare a common condition called "normal_style" else: font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(font_familyname) == 0 or len(font_fullname) == 0: yield SKIP, ("Font lacks familyname and/or" " fullname entries in name table.") # FIXME: This is the same SKIP condition as in check/metadata/italic_style # so we definitely need to address them with a common condition! else: font_familyname = font_familyname[0] font_fullname = font_fullname[0] if bool(ttFont["head"].macStyle & MacStyle.ITALIC): yield FAIL, Message("bad-macstyle", ("METADATA.pb style has been set to normal" " but font macStyle is improperly set.")) elif font_familyname.split("-")[-1].endswith('Italic'): yield FAIL, Message("familyname-italic", ("Font macStyle indicates a non-Italic font, but" " nameID {} (FONT_FAMILY_NAME: \"{}\") ends with" " \"Italic\".").format(NameID.FONT_FAMILY_NAME, font_familyname)) elif font_fullname.split("-")[-1].endswith("Italic"): yield FAIL, Message("fullfont-italic", ("Font macStyle indicates a non-Italic font but" " nameID {} (FULL_FONT_NAME: \"{}\") ends with" " \"Italic\".").format(NameID.FULL_FONT_NAME, font_fullname)) else: yield PASS, ("METADATA.pb font.style \"normal\"" " matches font internals.")
METADATA.pb font.style "normal" matches font internals?
Below is the the instruction that describes the task: ### Input: METADATA.pb font.style "normal" matches font internals? ### Response: def com_google_fonts_check_metadata_normal_style(ttFont, font_metadata): """METADATA.pb font.style "normal" matches font internals?""" from fontbakery.utils import get_name_entry_strings from fontbakery.constants import MacStyle if font_metadata.style != "normal": yield SKIP, "This check only applies to normal fonts." # FIXME: declare a common condition called "normal_style" else: font_familyname = get_name_entry_strings(ttFont, NameID.FONT_FAMILY_NAME) font_fullname = get_name_entry_strings(ttFont, NameID.FULL_FONT_NAME) if len(font_familyname) == 0 or len(font_fullname) == 0: yield SKIP, ("Font lacks familyname and/or" " fullname entries in name table.") # FIXME: This is the same SKIP condition as in check/metadata/italic_style # so we definitely need to address them with a common condition! else: font_familyname = font_familyname[0] font_fullname = font_fullname[0] if bool(ttFont["head"].macStyle & MacStyle.ITALIC): yield FAIL, Message("bad-macstyle", ("METADATA.pb style has been set to normal" " but font macStyle is improperly set.")) elif font_familyname.split("-")[-1].endswith('Italic'): yield FAIL, Message("familyname-italic", ("Font macStyle indicates a non-Italic font, but" " nameID {} (FONT_FAMILY_NAME: \"{}\") ends with" " \"Italic\".").format(NameID.FONT_FAMILY_NAME, font_familyname)) elif font_fullname.split("-")[-1].endswith("Italic"): yield FAIL, Message("fullfont-italic", ("Font macStyle indicates a non-Italic font but" " nameID {} (FULL_FONT_NAME: \"{}\") ends with" " \"Italic\".").format(NameID.FULL_FONT_NAME, font_fullname)) else: yield PASS, ("METADATA.pb font.style \"normal\"" " matches font internals.")
def get_context_data(self, **kwargs): """ Add filter form to the context. TODO: Currently we construct the filter form object twice - in get_queryset and here, in get_context_data. Will need to figure out a good way to eliminate extra initialization. """ context = super(FilterFormMixin, self).get_context_data(**kwargs) context[self.context_filterform_name] = self.get_filter() return context
Add filter form to the context. TODO: Currently we construct the filter form object twice - in get_queryset and here, in get_context_data. Will need to figure out a good way to eliminate extra initialization.
Below is the the instruction that describes the task: ### Input: Add filter form to the context. TODO: Currently we construct the filter form object twice - in get_queryset and here, in get_context_data. Will need to figure out a good way to eliminate extra initialization. ### Response: def get_context_data(self, **kwargs): """ Add filter form to the context. TODO: Currently we construct the filter form object twice - in get_queryset and here, in get_context_data. Will need to figure out a good way to eliminate extra initialization. """ context = super(FilterFormMixin, self).get_context_data(**kwargs) context[self.context_filterform_name] = self.get_filter() return context
def send(self, xso): """ Send a single XML stream object. :param xso: Object to serialise and send. :type xso: :class:`aioxmpp.xso.XSO` :raises Exception: from any serialisation errors, usually :class:`ValueError`. Serialise the `xso` and send it over the stream. If any serialisation error occurs, no data is sent over the stream and the exception is re-raised; the :meth:`send` method thus provides strong exception safety. .. warning:: The behaviour of :meth:`send` after :meth:`abort` or :meth:`close` and before :meth:`start` is undefined. """ with self._writer.buffer(): xso.unparse_to_sax(self._writer)
Send a single XML stream object. :param xso: Object to serialise and send. :type xso: :class:`aioxmpp.xso.XSO` :raises Exception: from any serialisation errors, usually :class:`ValueError`. Serialise the `xso` and send it over the stream. If any serialisation error occurs, no data is sent over the stream and the exception is re-raised; the :meth:`send` method thus provides strong exception safety. .. warning:: The behaviour of :meth:`send` after :meth:`abort` or :meth:`close` and before :meth:`start` is undefined.
Below is the the instruction that describes the task: ### Input: Send a single XML stream object. :param xso: Object to serialise and send. :type xso: :class:`aioxmpp.xso.XSO` :raises Exception: from any serialisation errors, usually :class:`ValueError`. Serialise the `xso` and send it over the stream. If any serialisation error occurs, no data is sent over the stream and the exception is re-raised; the :meth:`send` method thus provides strong exception safety. .. warning:: The behaviour of :meth:`send` after :meth:`abort` or :meth:`close` and before :meth:`start` is undefined. ### Response: def send(self, xso): """ Send a single XML stream object. :param xso: Object to serialise and send. :type xso: :class:`aioxmpp.xso.XSO` :raises Exception: from any serialisation errors, usually :class:`ValueError`. Serialise the `xso` and send it over the stream. If any serialisation error occurs, no data is sent over the stream and the exception is re-raised; the :meth:`send` method thus provides strong exception safety. .. warning:: The behaviour of :meth:`send` after :meth:`abort` or :meth:`close` and before :meth:`start` is undefined. """ with self._writer.buffer(): xso.unparse_to_sax(self._writer)
def start_dashboard(redis_address, temp_dir, stdout_file=None, stderr_file=None, redis_password=None): """Start a dashboard process. Args: redis_address (str): The address of the Redis instance. temp_dir (str): The temporary directory used for log files and information for this Ray session. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. """ port = 8080 while True: try: port_test_socket = socket.socket() port_test_socket.bind(("127.0.0.1", port)) port_test_socket.close() break except socket.error: port += 1 token = ray.utils.decode(binascii.hexlify(os.urandom(24))) dashboard_filepath = os.path.join( os.path.dirname(os.path.abspath(__file__)), "dashboard/dashboard.py") command = [ sys.executable, "-u", dashboard_filepath, "--redis-address={}".format(redis_address), "--http-port={}".format(port), "--token={}".format(token), "--temp-dir={}".format(temp_dir), ] if redis_password: command += ["--redis-password", redis_password] if sys.version_info <= (3, 0): return None, None try: import aiohttp # noqa: F401 import psutil # noqa: F401 except ImportError: raise ImportError( "Failed to start the dashboard. The dashboard requires Python 3 " "as well as 'pip install aiohttp psutil'.") process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_DASHBOARD, stdout_file=stdout_file, stderr_file=stderr_file) dashboard_url = "http://{}:{}/?token={}".format( ray.services.get_node_ip_address(), port, token) print("\n" + "=" * 70) print("View the dashboard at {}".format(dashboard_url)) print("=" * 70 + "\n") return dashboard_url, process_info
Start a dashboard process. Args: redis_address (str): The address of the Redis instance. temp_dir (str): The temporary directory used for log files and information for this Ray session. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started.
Below is the the instruction that describes the task: ### Input: Start a dashboard process. Args: redis_address (str): The address of the Redis instance. temp_dir (str): The temporary directory used for log files and information for this Ray session. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. ### Response: def start_dashboard(redis_address, temp_dir, stdout_file=None, stderr_file=None, redis_password=None): """Start a dashboard process. Args: redis_address (str): The address of the Redis instance. temp_dir (str): The temporary directory used for log files and information for this Ray session. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. redis_password (str): The password of the redis server. Returns: ProcessInfo for the process that was started. """ port = 8080 while True: try: port_test_socket = socket.socket() port_test_socket.bind(("127.0.0.1", port)) port_test_socket.close() break except socket.error: port += 1 token = ray.utils.decode(binascii.hexlify(os.urandom(24))) dashboard_filepath = os.path.join( os.path.dirname(os.path.abspath(__file__)), "dashboard/dashboard.py") command = [ sys.executable, "-u", dashboard_filepath, "--redis-address={}".format(redis_address), "--http-port={}".format(port), "--token={}".format(token), "--temp-dir={}".format(temp_dir), ] if redis_password: command += ["--redis-password", redis_password] if sys.version_info <= (3, 0): return None, None try: import aiohttp # noqa: F401 import psutil # noqa: F401 except ImportError: raise ImportError( "Failed to start the dashboard. The dashboard requires Python 3 " "as well as 'pip install aiohttp psutil'.") process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_DASHBOARD, stdout_file=stdout_file, stderr_file=stderr_file) dashboard_url = "http://{}:{}/?token={}".format( ray.services.get_node_ip_address(), port, token) print("\n" + "=" * 70) print("View the dashboard at {}".format(dashboard_url)) print("=" * 70 + "\n") return dashboard_url, process_info
def phase_from_frequencyseries(htilde, remove_start_phase=True): """Returns the phase from the given frequency-domain waveform. This assumes that the waveform has been sampled finely enough that the phase cannot change by more than pi radians between each step. Parameters ---------- htilde : FrequencySeries The waveform to get the phase for; must be a complex frequency series. remove_start_phase : {True, bool} Subtract the initial phase before returning. Returns ------- FrequencySeries The phase of the waveform as a function of frequency. """ p = numpy.unwrap(numpy.angle(htilde.data)).astype( real_same_precision_as(htilde)) if remove_start_phase: p += -p[0] return FrequencySeries(p, delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False)
Returns the phase from the given frequency-domain waveform. This assumes that the waveform has been sampled finely enough that the phase cannot change by more than pi radians between each step. Parameters ---------- htilde : FrequencySeries The waveform to get the phase for; must be a complex frequency series. remove_start_phase : {True, bool} Subtract the initial phase before returning. Returns ------- FrequencySeries The phase of the waveform as a function of frequency.
Below is the the instruction that describes the task: ### Input: Returns the phase from the given frequency-domain waveform. This assumes that the waveform has been sampled finely enough that the phase cannot change by more than pi radians between each step. Parameters ---------- htilde : FrequencySeries The waveform to get the phase for; must be a complex frequency series. remove_start_phase : {True, bool} Subtract the initial phase before returning. Returns ------- FrequencySeries The phase of the waveform as a function of frequency. ### Response: def phase_from_frequencyseries(htilde, remove_start_phase=True): """Returns the phase from the given frequency-domain waveform. This assumes that the waveform has been sampled finely enough that the phase cannot change by more than pi radians between each step. Parameters ---------- htilde : FrequencySeries The waveform to get the phase for; must be a complex frequency series. remove_start_phase : {True, bool} Subtract the initial phase before returning. Returns ------- FrequencySeries The phase of the waveform as a function of frequency. """ p = numpy.unwrap(numpy.angle(htilde.data)).astype( real_same_precision_as(htilde)) if remove_start_phase: p += -p[0] return FrequencySeries(p, delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False)
def func_globals_inject(func, **overrides): ''' Override specific variables within a function's global context. ''' # recognize methods if hasattr(func, 'im_func'): func = func.__func__ # Get a reference to the function globals dictionary func_globals = func.__globals__ # Save the current function globals dictionary state values for the # overridden objects injected_func_globals = [] overridden_func_globals = {} for override in overrides: if override in func_globals: overridden_func_globals[override] = func_globals[override] else: injected_func_globals.append(override) # Override the function globals with what's passed in the above overrides func_globals.update(overrides) # The context is now ready to be used yield # We're now done with the context # Restore the overwritten function globals func_globals.update(overridden_func_globals) # Remove any entry injected in the function globals for injected in injected_func_globals: del func_globals[injected]
Override specific variables within a function's global context.
Below is the the instruction that describes the task: ### Input: Override specific variables within a function's global context. ### Response: def func_globals_inject(func, **overrides): ''' Override specific variables within a function's global context. ''' # recognize methods if hasattr(func, 'im_func'): func = func.__func__ # Get a reference to the function globals dictionary func_globals = func.__globals__ # Save the current function globals dictionary state values for the # overridden objects injected_func_globals = [] overridden_func_globals = {} for override in overrides: if override in func_globals: overridden_func_globals[override] = func_globals[override] else: injected_func_globals.append(override) # Override the function globals with what's passed in the above overrides func_globals.update(overrides) # The context is now ready to be used yield # We're now done with the context # Restore the overwritten function globals func_globals.update(overridden_func_globals) # Remove any entry injected in the function globals for injected in injected_func_globals: del func_globals[injected]
def n_frames_total(self, stride=1, skip=0): r"""Returns total number of frames. Parameters ---------- stride : int return value is the number of frames in trajectories when running through them with a step size of `stride`. skip : int, default=0 skip the first initial n frames per trajectory. Returns ------- n_frames_total : int total number of frames. """ if not IteratorState.is_uniform_stride(stride): return len(stride) return sum(self.trajectory_lengths(stride=stride, skip=skip))
r"""Returns total number of frames. Parameters ---------- stride : int return value is the number of frames in trajectories when running through them with a step size of `stride`. skip : int, default=0 skip the first initial n frames per trajectory. Returns ------- n_frames_total : int total number of frames.
Below is the the instruction that describes the task: ### Input: r"""Returns total number of frames. Parameters ---------- stride : int return value is the number of frames in trajectories when running through them with a step size of `stride`. skip : int, default=0 skip the first initial n frames per trajectory. Returns ------- n_frames_total : int total number of frames. ### Response: def n_frames_total(self, stride=1, skip=0): r"""Returns total number of frames. Parameters ---------- stride : int return value is the number of frames in trajectories when running through them with a step size of `stride`. skip : int, default=0 skip the first initial n frames per trajectory. Returns ------- n_frames_total : int total number of frames. """ if not IteratorState.is_uniform_stride(stride): return len(stride) return sum(self.trajectory_lengths(stride=stride, skip=skip))
def populate(projects_to_filter=None, group=None): """ Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups. """ if projects_to_filter is None: projects_to_filter = [] import benchbuild.projects as all_projects all_projects.discover() prjs = ProjectRegistry.projects if projects_to_filter: prjs = {} for filter_project in set(projects_to_filter): try: prjs.update({ x: y for x, y in ProjectRegistry.projects.items( prefix=filter_project) }) except KeyError: pass if group: groupkeys = set(group) prjs = { name: cls for name, cls in prjs.items() if cls.GROUP in groupkeys } return { x: prjs[x] for x in prjs if prjs[x].DOMAIN != "debug" or x in projects_to_filter }
Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups.
Below is the the instruction that describes the task: ### Input: Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups. ### Response: def populate(projects_to_filter=None, group=None): """ Populate the list of projects that belong to this experiment. Args: projects_to_filter (list(Project)): List of projects we want to assign to this experiment. We intersect the list of projects with the list of supported projects to get the list of projects that belong to this experiment. group (list(str)): In addition to the project filter, we provide a way to filter whole groups. """ if projects_to_filter is None: projects_to_filter = [] import benchbuild.projects as all_projects all_projects.discover() prjs = ProjectRegistry.projects if projects_to_filter: prjs = {} for filter_project in set(projects_to_filter): try: prjs.update({ x: y for x, y in ProjectRegistry.projects.items( prefix=filter_project) }) except KeyError: pass if group: groupkeys = set(group) prjs = { name: cls for name, cls in prjs.items() if cls.GROUP in groupkeys } return { x: prjs[x] for x in prjs if prjs[x].DOMAIN != "debug" or x in projects_to_filter }
def copy_submission_locally(self, cloud_path): """Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to """ local_path = os.path.join(self.download_dir, os.path.basename(cloud_path)) cmd = ['gsutil', 'cp', cloud_path, local_path] if subprocess.call(cmd) != 0: logging.error('Can\'t copy submission locally') return None return local_path
Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to
Below is the the instruction that describes the task: ### Input: Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to ### Response: def copy_submission_locally(self, cloud_path): """Copies submission from Google Cloud Storage to local directory. Args: cloud_path: path of the submission in Google Cloud Storage Returns: name of the local file where submission is copied to """ local_path = os.path.join(self.download_dir, os.path.basename(cloud_path)) cmd = ['gsutil', 'cp', cloud_path, local_path] if subprocess.call(cmd) != 0: logging.error('Can\'t copy submission locally') return None return local_path
def calculate(self, order, transaction): """ Pay commission based on dollar value of shares. """ cost_per_share = transaction.price * self.cost_per_dollar return abs(transaction.amount) * cost_per_share
Pay commission based on dollar value of shares.
Below is the the instruction that describes the task: ### Input: Pay commission based on dollar value of shares. ### Response: def calculate(self, order, transaction): """ Pay commission based on dollar value of shares. """ cost_per_share = transaction.price * self.cost_per_dollar return abs(transaction.amount) * cost_per_share
def get_ntlmv2_response(domain, user, password, server_challenge, client_challenge, timestamp, target_info): """ [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication Computes an appropriate NTLMv2 response. The algorithm is based on jCIFS and the ComputeResponse() implementation the protocol documentation. Note: The MS ComputeResponse() implementation refers to a variable called ServerName, this is for historical reasons and is misleading. ServerName refers to the bytes that compose the AV_PAIRS structure called target_info. The reserved constants below are defined in the documentation :param response_key: The return value from NTOWF() :param server_challenge: The 8-byte challenge message generated by the server :param client_challenge: The 8-byte challenge message generated by the client :param timestamp: The 8-byte little-endian time in GMT :param target_info: The AttributeValuePairs structure to be returned to the server :return: NTLMv2 Response """ lo_response_version = b'\x01' hi_response_version = b'\x01' reserved_dword = b'\x00' * 4 reserved_bytes = b'\x00' * 6 response_key = PasswordAuthentication.ntowfv2(domain, user, password) proof_material = lo_response_version proof_material += hi_response_version proof_material += reserved_bytes proof_material += timestamp proof_material += client_challenge proof_material += reserved_dword proof_material += target_info.get_data() proof_material += reserved_dword proof = PasswordAuthentication._compute_response(response_key, server_challenge, proof_material) # The master session key derivation session_key = hmac.HMAC(response_key, hashes.MD5(), backend=default_backend()) session_key.update(proof) session_master_key = session_key.finalize() return proof + proof_material, session_master_key, target_info
[MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication Computes an appropriate NTLMv2 response. The algorithm is based on jCIFS and the ComputeResponse() implementation the protocol documentation. Note: The MS ComputeResponse() implementation refers to a variable called ServerName, this is for historical reasons and is misleading. ServerName refers to the bytes that compose the AV_PAIRS structure called target_info. The reserved constants below are defined in the documentation :param response_key: The return value from NTOWF() :param server_challenge: The 8-byte challenge message generated by the server :param client_challenge: The 8-byte challenge message generated by the client :param timestamp: The 8-byte little-endian time in GMT :param target_info: The AttributeValuePairs structure to be returned to the server :return: NTLMv2 Response
Below is the the instruction that describes the task: ### Input: [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication Computes an appropriate NTLMv2 response. The algorithm is based on jCIFS and the ComputeResponse() implementation the protocol documentation. Note: The MS ComputeResponse() implementation refers to a variable called ServerName, this is for historical reasons and is misleading. ServerName refers to the bytes that compose the AV_PAIRS structure called target_info. The reserved constants below are defined in the documentation :param response_key: The return value from NTOWF() :param server_challenge: The 8-byte challenge message generated by the server :param client_challenge: The 8-byte challenge message generated by the client :param timestamp: The 8-byte little-endian time in GMT :param target_info: The AttributeValuePairs structure to be returned to the server :return: NTLMv2 Response ### Response: def get_ntlmv2_response(domain, user, password, server_challenge, client_challenge, timestamp, target_info): """ [MS-NLMP] v20140502 NT LAN Manager (NTLM) Authentication Protocol 3.3.2 NTLM v2 Authentication Computes an appropriate NTLMv2 response. The algorithm is based on jCIFS and the ComputeResponse() implementation the protocol documentation. Note: The MS ComputeResponse() implementation refers to a variable called ServerName, this is for historical reasons and is misleading. ServerName refers to the bytes that compose the AV_PAIRS structure called target_info. The reserved constants below are defined in the documentation :param response_key: The return value from NTOWF() :param server_challenge: The 8-byte challenge message generated by the server :param client_challenge: The 8-byte challenge message generated by the client :param timestamp: The 8-byte little-endian time in GMT :param target_info: The AttributeValuePairs structure to be returned to the server :return: NTLMv2 Response """ lo_response_version = b'\x01' hi_response_version = b'\x01' reserved_dword = b'\x00' * 4 reserved_bytes = b'\x00' * 6 response_key = PasswordAuthentication.ntowfv2(domain, user, password) proof_material = lo_response_version proof_material += hi_response_version proof_material += reserved_bytes proof_material += timestamp proof_material += client_challenge proof_material += reserved_dword proof_material += target_info.get_data() proof_material += reserved_dword proof = PasswordAuthentication._compute_response(response_key, server_challenge, proof_material) # The master session key derivation session_key = hmac.HMAC(response_key, hashes.MD5(), backend=default_backend()) session_key.update(proof) session_master_key = session_key.finalize() return proof + proof_material, session_master_key, target_info
def get_prediction_results(model_dir_or_id, data, headers, img_cols=None, cloud=False, with_source=True, show_image=True): """ Predict with a specified model. It predicts with the model, join source data with prediction results, and formats the results so they can be displayed nicely in Datalab. Args: model_dir_or_id: The model directory if cloud is False, or model.version if cloud is True. data: Can be a list of dictionaries, a list of csv lines, or a Pandas DataFrame. If it is not a list of csv lines, data will be converted to csv lines first, using the orders specified by headers and then send to model. For images, it can be image gs urls or in-memory PIL images. Images will be converted to base64 encoded strings before prediction. headers: the column names of data. It specifies the order of the columns when serializing to csv lines for prediction. img_cols: The image url columns. If specified, the img_urls will be converted to base64 encoded image bytes. with_source: Whether return a joined prediction source and prediction results, or prediction results only. show_image: When displaying prediction source, whether to add a column of image bytes for each image url column. Returns: A dataframe of joined prediction source and prediction results, or prediction results only. """ if img_cols is None: img_cols = [] if isinstance(data, pd.DataFrame): data = list(data.T.to_dict().values()) elif isinstance(data[0], six.string_types): data = list(csv.DictReader(data, fieldnames=headers)) images = _download_images(data, img_cols) predict_data = _get_predicton_csv_lines(data, headers, images) if cloud: parts = model_dir_or_id.split('.') if len(parts) != 2: raise ValueError('Invalid model name for cloud prediction. Use "model.version".') predict_results = ml.ModelVersions(parts[0]).predict(parts[1], predict_data) else: tf_logging_level = logging.getLogger("tensorflow").level logging.getLogger("tensorflow").setLevel(logging.WARNING) try: predict_results = _tf_predict(model_dir_or_id, predict_data) finally: logging.getLogger("tensorflow").setLevel(tf_logging_level) df_r = pd.DataFrame(predict_results) if not with_source: return df_r display_data = data if show_image: display_data = _get_display_data_with_images(data, images) df_s = pd.DataFrame(display_data) df = pd.concat([df_r, df_s], axis=1) # Remove duplicate columns. All 'key' columns are duplicate here. df = df.loc[:, ~df.columns.duplicated()] return df
Predict with a specified model. It predicts with the model, join source data with prediction results, and formats the results so they can be displayed nicely in Datalab. Args: model_dir_or_id: The model directory if cloud is False, or model.version if cloud is True. data: Can be a list of dictionaries, a list of csv lines, or a Pandas DataFrame. If it is not a list of csv lines, data will be converted to csv lines first, using the orders specified by headers and then send to model. For images, it can be image gs urls or in-memory PIL images. Images will be converted to base64 encoded strings before prediction. headers: the column names of data. It specifies the order of the columns when serializing to csv lines for prediction. img_cols: The image url columns. If specified, the img_urls will be converted to base64 encoded image bytes. with_source: Whether return a joined prediction source and prediction results, or prediction results only. show_image: When displaying prediction source, whether to add a column of image bytes for each image url column. Returns: A dataframe of joined prediction source and prediction results, or prediction results only.
Below is the the instruction that describes the task: ### Input: Predict with a specified model. It predicts with the model, join source data with prediction results, and formats the results so they can be displayed nicely in Datalab. Args: model_dir_or_id: The model directory if cloud is False, or model.version if cloud is True. data: Can be a list of dictionaries, a list of csv lines, or a Pandas DataFrame. If it is not a list of csv lines, data will be converted to csv lines first, using the orders specified by headers and then send to model. For images, it can be image gs urls or in-memory PIL images. Images will be converted to base64 encoded strings before prediction. headers: the column names of data. It specifies the order of the columns when serializing to csv lines for prediction. img_cols: The image url columns. If specified, the img_urls will be converted to base64 encoded image bytes. with_source: Whether return a joined prediction source and prediction results, or prediction results only. show_image: When displaying prediction source, whether to add a column of image bytes for each image url column. Returns: A dataframe of joined prediction source and prediction results, or prediction results only. ### Response: def get_prediction_results(model_dir_or_id, data, headers, img_cols=None, cloud=False, with_source=True, show_image=True): """ Predict with a specified model. It predicts with the model, join source data with prediction results, and formats the results so they can be displayed nicely in Datalab. Args: model_dir_or_id: The model directory if cloud is False, or model.version if cloud is True. data: Can be a list of dictionaries, a list of csv lines, or a Pandas DataFrame. If it is not a list of csv lines, data will be converted to csv lines first, using the orders specified by headers and then send to model. For images, it can be image gs urls or in-memory PIL images. Images will be converted to base64 encoded strings before prediction. headers: the column names of data. It specifies the order of the columns when serializing to csv lines for prediction. img_cols: The image url columns. If specified, the img_urls will be converted to base64 encoded image bytes. with_source: Whether return a joined prediction source and prediction results, or prediction results only. show_image: When displaying prediction source, whether to add a column of image bytes for each image url column. Returns: A dataframe of joined prediction source and prediction results, or prediction results only. """ if img_cols is None: img_cols = [] if isinstance(data, pd.DataFrame): data = list(data.T.to_dict().values()) elif isinstance(data[0], six.string_types): data = list(csv.DictReader(data, fieldnames=headers)) images = _download_images(data, img_cols) predict_data = _get_predicton_csv_lines(data, headers, images) if cloud: parts = model_dir_or_id.split('.') if len(parts) != 2: raise ValueError('Invalid model name for cloud prediction. Use "model.version".') predict_results = ml.ModelVersions(parts[0]).predict(parts[1], predict_data) else: tf_logging_level = logging.getLogger("tensorflow").level logging.getLogger("tensorflow").setLevel(logging.WARNING) try: predict_results = _tf_predict(model_dir_or_id, predict_data) finally: logging.getLogger("tensorflow").setLevel(tf_logging_level) df_r = pd.DataFrame(predict_results) if not with_source: return df_r display_data = data if show_image: display_data = _get_display_data_with_images(data, images) df_s = pd.DataFrame(display_data) df = pd.concat([df_r, df_s], axis=1) # Remove duplicate columns. All 'key' columns are duplicate here. df = df.loc[:, ~df.columns.duplicated()] return df
def get_history_item_for_tree_iter(self, child_tree_iter): """Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item: """ history_item = self.history_tree_store[child_tree_iter][self.HISTORY_ITEM_STORAGE_ID] if history_item is None: # is dummy item if self.history_tree_store.iter_n_children(child_tree_iter) > 0: child_iter = self.history_tree_store.iter_nth_child(child_tree_iter, 0) history_item = self.history_tree_store[child_iter][self.HISTORY_ITEM_STORAGE_ID] else: logger.debug("In a dummy history should be respective real call element.") return history_item
Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item:
Below is the the instruction that describes the task: ### Input: Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item: ### Response: def get_history_item_for_tree_iter(self, child_tree_iter): """Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item: """ history_item = self.history_tree_store[child_tree_iter][self.HISTORY_ITEM_STORAGE_ID] if history_item is None: # is dummy item if self.history_tree_store.iter_n_children(child_tree_iter) > 0: child_iter = self.history_tree_store.iter_nth_child(child_tree_iter, 0) history_item = self.history_tree_store[child_iter][self.HISTORY_ITEM_STORAGE_ID] else: logger.debug("In a dummy history should be respective real call element.") return history_item
def itable(*args, **kwargs): ''' itable(...) yields a new immutable table object from the given set of arguments. The arguments may be any number of maps or itables followed by any number of keyword arguments. All the entries from the arguments and keywords are collapsed left-to-right (respecting laziness), and the resulting column set is returned as the itable. Arguments and maps may contain values that are functions of zero arguments; these are considered lazy values and are not evaluated by the itable function. ''' # a couple things we want to check first... does our argument list reduce to just an empty # itable or just a single itable? if len(args) == 0 and len(kwargs) == 0: return ITable({}, n=0) elif len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], ITable): return args[0] # we want to try to convert any arguments we can from datatables into maps try: import pandas args = [{k:a[k].values for k in a.keys()} if isinstance(a, pandas.DataFrame) else a for a in args] except: pass # now we want to merge these together and make them one lazy map m0 = lazy_map(merge(args, kwargs)) # see if we can deduce the row size from a non-lazy argument: v = next((m0[k] for k in six.iterkeys(m0) if not m0.is_lazy(k)), None) if is_lazy_map(m0) else \ None if v is None: k = next((u for u in six.iterkeys(m0)), m0) if k is m0: v = None else: v = m0[k] return ITable(m0, n=(None if v is None else len(v)))
itable(...) yields a new immutable table object from the given set of arguments. The arguments may be any number of maps or itables followed by any number of keyword arguments. All the entries from the arguments and keywords are collapsed left-to-right (respecting laziness), and the resulting column set is returned as the itable. Arguments and maps may contain values that are functions of zero arguments; these are considered lazy values and are not evaluated by the itable function.
Below is the the instruction that describes the task: ### Input: itable(...) yields a new immutable table object from the given set of arguments. The arguments may be any number of maps or itables followed by any number of keyword arguments. All the entries from the arguments and keywords are collapsed left-to-right (respecting laziness), and the resulting column set is returned as the itable. Arguments and maps may contain values that are functions of zero arguments; these are considered lazy values and are not evaluated by the itable function. ### Response: def itable(*args, **kwargs): ''' itable(...) yields a new immutable table object from the given set of arguments. The arguments may be any number of maps or itables followed by any number of keyword arguments. All the entries from the arguments and keywords are collapsed left-to-right (respecting laziness), and the resulting column set is returned as the itable. Arguments and maps may contain values that are functions of zero arguments; these are considered lazy values and are not evaluated by the itable function. ''' # a couple things we want to check first... does our argument list reduce to just an empty # itable or just a single itable? if len(args) == 0 and len(kwargs) == 0: return ITable({}, n=0) elif len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], ITable): return args[0] # we want to try to convert any arguments we can from datatables into maps try: import pandas args = [{k:a[k].values for k in a.keys()} if isinstance(a, pandas.DataFrame) else a for a in args] except: pass # now we want to merge these together and make them one lazy map m0 = lazy_map(merge(args, kwargs)) # see if we can deduce the row size from a non-lazy argument: v = next((m0[k] for k in six.iterkeys(m0) if not m0.is_lazy(k)), None) if is_lazy_map(m0) else \ None if v is None: k = next((u for u in six.iterkeys(m0)), m0) if k is m0: v = None else: v = m0[k] return ITable(m0, n=(None if v is None else len(v)))
def create(cls, statement_format, date_start, date_end, monetary_account_id=None, regional_format=None, custom_headers=None): """ :type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_STATEMENT_FORMAT: statement_format, cls.FIELD_DATE_START: date_start, cls.FIELD_DATE_END: date_end, cls.FIELD_REGIONAL_FORMAT: regional_format } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
:type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
Below is the the instruction that describes the task: ### Input: :type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt ### Response: def create(cls, statement_format, date_start, date_end, monetary_account_id=None, regional_format=None, custom_headers=None): """ :type user_id: int :type monetary_account_id: int :param statement_format: The format type of statement. Allowed values: MT940, CSV, PDF. :type statement_format: str :param date_start: The start date for making statements. :type date_start: str :param date_end: The end date for making statements. :type date_end: str :param regional_format: Required for CSV exports. The regional format of the statement, can be UK_US (comma-separated) or EUROPEAN (semicolon-separated). :type regional_format: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_STATEMENT_FORMAT: statement_format, cls.FIELD_DATE_START: date_start, cls.FIELD_DATE_END: date_end, cls.FIELD_REGIONAL_FORMAT: regional_format } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id)) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
def post(self, request, *args, **kwargs): """ Method for handling POST requests. Validates submitted form and formsets. Saves if valid, re displays page with errors if invalid. """ self.object = self.get_object() form_class = self.get_form_class() form = self.get_form(form_class) formsets = self.get_formsets(form, saving=True) valid_formsets = True for formset in formsets.values(): if not formset.is_valid(): valid_formsets = False break if self.is_valid(form, formsets): return self.form_valid(form, formsets) else: adminForm = self.get_admin_form(form) adminFormSets = self.get_admin_formsets(formsets) context = { 'adminForm': adminForm, 'formsets': adminFormSets, 'obj': self.object, } return self.form_invalid(form=form, **context)
Method for handling POST requests. Validates submitted form and formsets. Saves if valid, re displays page with errors if invalid.
Below is the the instruction that describes the task: ### Input: Method for handling POST requests. Validates submitted form and formsets. Saves if valid, re displays page with errors if invalid. ### Response: def post(self, request, *args, **kwargs): """ Method for handling POST requests. Validates submitted form and formsets. Saves if valid, re displays page with errors if invalid. """ self.object = self.get_object() form_class = self.get_form_class() form = self.get_form(form_class) formsets = self.get_formsets(form, saving=True) valid_formsets = True for formset in formsets.values(): if not formset.is_valid(): valid_formsets = False break if self.is_valid(form, formsets): return self.form_valid(form, formsets) else: adminForm = self.get_admin_form(form) adminFormSets = self.get_admin_formsets(formsets) context = { 'adminForm': adminForm, 'formsets': adminFormSets, 'obj': self.object, } return self.form_invalid(form=form, **context)
def file_add(backend, kitchen, recipe, message, filepath): """ Add a newly created file to a Recipe """ err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen) if use_kitchen is None: raise click.ClickException(err_str) if recipe is None: recipe = DKRecipeDisk.find_recipe_name() if recipe is None: raise click.ClickException('You must be in a recipe folder, or provide a recipe name.') click.secho('%s - Adding File (%s) to Recipe (%s) in kitchen(%s) with message (%s)' % (get_datetime(), filepath, recipe, use_kitchen, message), fg='green') check_and_print(DKCloudCommandRunner.add_file(backend.dki, use_kitchen, recipe, message, filepath))
Add a newly created file to a Recipe
Below is the the instruction that describes the task: ### Input: Add a newly created file to a Recipe ### Response: def file_add(backend, kitchen, recipe, message, filepath): """ Add a newly created file to a Recipe """ err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen) if use_kitchen is None: raise click.ClickException(err_str) if recipe is None: recipe = DKRecipeDisk.find_recipe_name() if recipe is None: raise click.ClickException('You must be in a recipe folder, or provide a recipe name.') click.secho('%s - Adding File (%s) to Recipe (%s) in kitchen(%s) with message (%s)' % (get_datetime(), filepath, recipe, use_kitchen, message), fg='green') check_and_print(DKCloudCommandRunner.add_file(backend.dki, use_kitchen, recipe, message, filepath))
def send_photo(self, chat_id, photo, caption=None, reply_to_message_id=None, reply_markup=None): """ Use this method to send photos. On success, the sent Message is returned. """ self.logger.info('send photo %s', photo) payload = dict(chat_id=chat_id, caption=caption, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup) files = dict(photo=open(photo, 'rb')) return Message.from_api(self, **self._post('sendPhoto', payload, files))
Use this method to send photos. On success, the sent Message is returned.
Below is the the instruction that describes the task: ### Input: Use this method to send photos. On success, the sent Message is returned. ### Response: def send_photo(self, chat_id, photo, caption=None, reply_to_message_id=None, reply_markup=None): """ Use this method to send photos. On success, the sent Message is returned. """ self.logger.info('send photo %s', photo) payload = dict(chat_id=chat_id, caption=caption, reply_to_message_id=reply_to_message_id, reply_markup=reply_markup) files = dict(photo=open(photo, 'rb')) return Message.from_api(self, **self._post('sendPhoto', payload, files))
def convert_to_str(l: Node) -> str: """ converts the non-negative number list into a string. """ result = "" while l: result += str(l.val) l = l.next return result
converts the non-negative number list into a string.
Below is the the instruction that describes the task: ### Input: converts the non-negative number list into a string. ### Response: def convert_to_str(l: Node) -> str: """ converts the non-negative number list into a string. """ result = "" while l: result += str(l.val) l = l.next return result
def download(self): """Download SRA files. Returns: :obj:`list` of :obj:`str`: List of downloaded files. """ self.downloaded_paths = list() for path in self.paths_for_download: downloaded_path = list() utils.mkdir_p(os.path.abspath(self.directory)) sra_run = path.split("/")[-1] logger.info("Analysing %s" % sra_run) url = type(self).FTP_ADDRESS_TPL.format( range_subdir=sra_run[:6], file_dir=sra_run) logger.debug("URL: %s", url) filepath = os.path.abspath( os.path.join(self.directory, "%s.sra" % sra_run)) utils.download_from_url( url, filepath, aspera=self.aspera, silent=self.silent, force=self.force) if self.filetype in ("fasta", "fastq"): if utils.which('fastq-dump') is None: logger.error("fastq-dump command not found") ftype = "" if self.filetype == "fasta": ftype = " --fasta " cmd = "fastq-dump" if utils.which('parallel-fastq-dump') is None: cmd += " %s --outdir %s %s" else: logger.debug("Using parallel fastq-dump") cmd = " parallel-fastq-dump --threads %s" cmd = cmd % self.threads cmd += " %s --outdir %s -s %s" cmd = cmd % (ftype, self.directory, filepath) for fqoption, fqvalue in iteritems(self.fastq_dump_options): if fqvalue: cmd += (" --%s %s" % (fqoption, fqvalue)) elif fqvalue is None: cmd += (" --%s" % fqoption) logger.debug(cmd) process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) logger.info("Converting to %s/%s*.%s.gz\n" % ( self.directory, sra_run, self.filetype)) pout, perr = process.communicate() downloaded_path = glob.glob(os.path.join( self.directory, "%s*.%s.gz" % (sra_run, self.filetype))) elif self.filetype == 'sra': downloaded_path = glob.glob(os.path.join( self.directory, "%s*.%s" % (sra_run, self.filetype))) else: downloaded_path = glob.glob(os.path.join( self.directory, "%s*" % sra_run)) logger.error("Filetype %s not supported." % self.filetype) if not self.keep_sra and self.filetype != 'sra': # Delete sra file os.unlink(filepath) self.downloaded_paths += downloaded_path return self.downloaded_paths
Download SRA files. Returns: :obj:`list` of :obj:`str`: List of downloaded files.
Below is the the instruction that describes the task: ### Input: Download SRA files. Returns: :obj:`list` of :obj:`str`: List of downloaded files. ### Response: def download(self): """Download SRA files. Returns: :obj:`list` of :obj:`str`: List of downloaded files. """ self.downloaded_paths = list() for path in self.paths_for_download: downloaded_path = list() utils.mkdir_p(os.path.abspath(self.directory)) sra_run = path.split("/")[-1] logger.info("Analysing %s" % sra_run) url = type(self).FTP_ADDRESS_TPL.format( range_subdir=sra_run[:6], file_dir=sra_run) logger.debug("URL: %s", url) filepath = os.path.abspath( os.path.join(self.directory, "%s.sra" % sra_run)) utils.download_from_url( url, filepath, aspera=self.aspera, silent=self.silent, force=self.force) if self.filetype in ("fasta", "fastq"): if utils.which('fastq-dump') is None: logger.error("fastq-dump command not found") ftype = "" if self.filetype == "fasta": ftype = " --fasta " cmd = "fastq-dump" if utils.which('parallel-fastq-dump') is None: cmd += " %s --outdir %s %s" else: logger.debug("Using parallel fastq-dump") cmd = " parallel-fastq-dump --threads %s" cmd = cmd % self.threads cmd += " %s --outdir %s -s %s" cmd = cmd % (ftype, self.directory, filepath) for fqoption, fqvalue in iteritems(self.fastq_dump_options): if fqvalue: cmd += (" --%s %s" % (fqoption, fqvalue)) elif fqvalue is None: cmd += (" --%s" % fqoption) logger.debug(cmd) process = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) logger.info("Converting to %s/%s*.%s.gz\n" % ( self.directory, sra_run, self.filetype)) pout, perr = process.communicate() downloaded_path = glob.glob(os.path.join( self.directory, "%s*.%s.gz" % (sra_run, self.filetype))) elif self.filetype == 'sra': downloaded_path = glob.glob(os.path.join( self.directory, "%s*.%s" % (sra_run, self.filetype))) else: downloaded_path = glob.glob(os.path.join( self.directory, "%s*" % sra_run)) logger.error("Filetype %s not supported." % self.filetype) if not self.keep_sra and self.filetype != 'sra': # Delete sra file os.unlink(filepath) self.downloaded_paths += downloaded_path return self.downloaded_paths
def pad_to_size(data, shape, value=0.0): """ This is similar to `pad`, except you specify the final shape of the array. Parameters ---------- data : ndarray Numpy array of any dimension and type. shape : tuple Final shape of padded array. Should be tuple of length ``data.ndim``. If it has to pad unevenly, it will pad one more at the end of the axis than at the beginning. If a dimension is specified as ``-1``, then it will remain its current size along that dimension. value : data.dtype The value with which to pad. Default is ``0.0``. This can even be an array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the size of the padded array. Examples -------- >>> import deepdish as dd >>> import numpy as np Pad an array with zeros. >>> x = np.ones((4, 2)) >>> dd.util.pad_to_size(x, (5, 5)) array([[ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) """ shape = [data.shape[i] if shape[i] == -1 else shape[i] for i in range(len(shape))] new_data = np.empty(shape) new_data[:] = value II = [slice((shape[i] - data.shape[i])//2, (shape[i] - data.shape[i])//2 + data.shape[i]) for i in range(len(shape))] new_data[II] = data return new_data
This is similar to `pad`, except you specify the final shape of the array. Parameters ---------- data : ndarray Numpy array of any dimension and type. shape : tuple Final shape of padded array. Should be tuple of length ``data.ndim``. If it has to pad unevenly, it will pad one more at the end of the axis than at the beginning. If a dimension is specified as ``-1``, then it will remain its current size along that dimension. value : data.dtype The value with which to pad. Default is ``0.0``. This can even be an array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the size of the padded array. Examples -------- >>> import deepdish as dd >>> import numpy as np Pad an array with zeros. >>> x = np.ones((4, 2)) >>> dd.util.pad_to_size(x, (5, 5)) array([[ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 0., 0., 0., 0.]])
Below is the the instruction that describes the task: ### Input: This is similar to `pad`, except you specify the final shape of the array. Parameters ---------- data : ndarray Numpy array of any dimension and type. shape : tuple Final shape of padded array. Should be tuple of length ``data.ndim``. If it has to pad unevenly, it will pad one more at the end of the axis than at the beginning. If a dimension is specified as ``-1``, then it will remain its current size along that dimension. value : data.dtype The value with which to pad. Default is ``0.0``. This can even be an array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the size of the padded array. Examples -------- >>> import deepdish as dd >>> import numpy as np Pad an array with zeros. >>> x = np.ones((4, 2)) >>> dd.util.pad_to_size(x, (5, 5)) array([[ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) ### Response: def pad_to_size(data, shape, value=0.0): """ This is similar to `pad`, except you specify the final shape of the array. Parameters ---------- data : ndarray Numpy array of any dimension and type. shape : tuple Final shape of padded array. Should be tuple of length ``data.ndim``. If it has to pad unevenly, it will pad one more at the end of the axis than at the beginning. If a dimension is specified as ``-1``, then it will remain its current size along that dimension. value : data.dtype The value with which to pad. Default is ``0.0``. This can even be an array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the size of the padded array. Examples -------- >>> import deepdish as dd >>> import numpy as np Pad an array with zeros. >>> x = np.ones((4, 2)) >>> dd.util.pad_to_size(x, (5, 5)) array([[ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 0., 0., 0., 0.]]) """ shape = [data.shape[i] if shape[i] == -1 else shape[i] for i in range(len(shape))] new_data = np.empty(shape) new_data[:] = value II = [slice((shape[i] - data.shape[i])//2, (shape[i] - data.shape[i])//2 + data.shape[i]) for i in range(len(shape))] new_data[II] = data return new_data
def sym(self, nested_scope=None): """Return the correspond symbolic number.""" operation = self.children[0].operation() expr = self.children[1].sym(nested_scope) return operation(expr)
Return the correspond symbolic number.
Below is the the instruction that describes the task: ### Input: Return the correspond symbolic number. ### Response: def sym(self, nested_scope=None): """Return the correspond symbolic number.""" operation = self.children[0].operation() expr = self.children[1].sym(nested_scope) return operation(expr)
def get_running(): ''' Return a list of all running services, so far as systemd is concerned CLI Example: .. code-block:: bash salt '*' service.get_running ''' ret = set() # Get running systemd units out = __salt__['cmd.run']( _systemctl_cmd('--full --no-legend --no-pager'), python_shell=False, ignore_retcode=True) for line in salt.utils.itertools.split(out, '\n'): try: comps = line.strip().split() fullname = comps[0] if len(comps) > 3: active_state = comps[3] except ValueError as exc: log.error(exc) continue else: if active_state != 'running': continue try: unit_name, unit_type = fullname.rsplit('.', 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == 'service' else fullname) return sorted(ret)
Return a list of all running services, so far as systemd is concerned CLI Example: .. code-block:: bash salt '*' service.get_running
Below is the the instruction that describes the task: ### Input: Return a list of all running services, so far as systemd is concerned CLI Example: .. code-block:: bash salt '*' service.get_running ### Response: def get_running(): ''' Return a list of all running services, so far as systemd is concerned CLI Example: .. code-block:: bash salt '*' service.get_running ''' ret = set() # Get running systemd units out = __salt__['cmd.run']( _systemctl_cmd('--full --no-legend --no-pager'), python_shell=False, ignore_retcode=True) for line in salt.utils.itertools.split(out, '\n'): try: comps = line.strip().split() fullname = comps[0] if len(comps) > 3: active_state = comps[3] except ValueError as exc: log.error(exc) continue else: if active_state != 'running': continue try: unit_name, unit_type = fullname.rsplit('.', 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == 'service' else fullname) return sorted(ret)
def get_profile_form(): """ Returns the profile form defined by ``settings.ACCOUNTS_PROFILE_FORM_CLASS``. """ from yacms.conf import settings try: return import_dotted_path(settings.ACCOUNTS_PROFILE_FORM_CLASS) except ImportError: raise ImproperlyConfigured("Value for ACCOUNTS_PROFILE_FORM_CLASS " "could not be imported: %s" % settings.ACCOUNTS_PROFILE_FORM_CLASS)
Returns the profile form defined by ``settings.ACCOUNTS_PROFILE_FORM_CLASS``.
Below is the the instruction that describes the task: ### Input: Returns the profile form defined by ``settings.ACCOUNTS_PROFILE_FORM_CLASS``. ### Response: def get_profile_form(): """ Returns the profile form defined by ``settings.ACCOUNTS_PROFILE_FORM_CLASS``. """ from yacms.conf import settings try: return import_dotted_path(settings.ACCOUNTS_PROFILE_FORM_CLASS) except ImportError: raise ImproperlyConfigured("Value for ACCOUNTS_PROFILE_FORM_CLASS " "could not be imported: %s" % settings.ACCOUNTS_PROFILE_FORM_CLASS)
def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True
Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label
Below is the the instruction that describes the task: ### Input: Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ### Response: def set_dhcp_only_all(interface): ''' Configure specified adapter to use DHCP only Change adapter mode to TCP/IP. If previous adapter mode was EtherCAT, the target will need reboot. :param str interface: interface label :return: True if the settings were applied, otherwise an exception will be thrown. :rtype: bool CLI Example: .. code-block:: bash salt '*' ip.dhcp_only_all interface-label ''' if not __grains__['lsb_distrib_id'] == 'nilrt': raise salt.exceptions.CommandExecutionError('Not supported in this version') initial_mode = _get_adapter_mode_info(interface) _save_config(interface, 'Mode', 'TCPIP') _save_config(interface, 'dhcpenabled', '1') _save_config(interface, 'linklocalenabled', '0') if initial_mode == 'ethercat': __salt__['system.set_reboot_required_witnessed']() else: _restart(interface) return True
def get_real_time_locate(ipAddress): """ function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. :param ipAddress: str value valid IPv4 IP address :return: dictionary containing hostIp, devId, deviceIP, ifDesc, ifIndex """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() real_time_locate_url = "/imcrs/res/access/realtimeLocate?type=2&value=" + str(ipAddress) + "&total=false" f_url = url + real_time_locate_url r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents if r.status_code == 200: return json.loads(r.text)['realtimeLocation'] else: print(r.status_code) print("An Error has occured")
function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. :param ipAddress: str value valid IPv4 IP address :return: dictionary containing hostIp, devId, deviceIP, ifDesc, ifIndex
Below is the the instruction that describes the task: ### Input: function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. :param ipAddress: str value valid IPv4 IP address :return: dictionary containing hostIp, devId, deviceIP, ifDesc, ifIndex ### Response: def get_real_time_locate(ipAddress): """ function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the target host is currently connected to. :param ipAddress: str value valid IPv4 IP address :return: dictionary containing hostIp, devId, deviceIP, ifDesc, ifIndex """ if auth is None or url is None: # checks to see if the imc credentials are already available set_imc_creds() real_time_locate_url = "/imcrs/res/access/realtimeLocate?type=2&value=" + str(ipAddress) + "&total=false" f_url = url + real_time_locate_url r = requests.get(f_url, auth=auth, headers=headers) # creates the URL using the payload variable as the contents if r.status_code == 200: return json.loads(r.text)['realtimeLocation'] else: print(r.status_code) print("An Error has occured")
def change_directory(path=None): """ Context manager that changes directory and resets it when existing >>> with change_directory('/tmp'): >>> pass """ if path is not None: try: oldpwd = getcwd() logger.debug('changing directory from %s to %s' % (oldpwd, path)) chdir(path) yield finally: chdir(oldpwd) else: yield
Context manager that changes directory and resets it when existing >>> with change_directory('/tmp'): >>> pass
Below is the the instruction that describes the task: ### Input: Context manager that changes directory and resets it when existing >>> with change_directory('/tmp'): >>> pass ### Response: def change_directory(path=None): """ Context manager that changes directory and resets it when existing >>> with change_directory('/tmp'): >>> pass """ if path is not None: try: oldpwd = getcwd() logger.debug('changing directory from %s to %s' % (oldpwd, path)) chdir(path) yield finally: chdir(oldpwd) else: yield
def open_channel(self): """Open a new channel with RabbitMQ. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika. """ logger.debug('Creating new channel') self._connection.channel(on_open_callback=self.on_channel_open)
Open a new channel with RabbitMQ. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika.
Below is the the instruction that describes the task: ### Input: Open a new channel with RabbitMQ. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika. ### Response: def open_channel(self): """Open a new channel with RabbitMQ. When RabbitMQ responds that the channel is open, the on_channel_open callback will be invoked by pika. """ logger.debug('Creating new channel') self._connection.channel(on_open_callback=self.on_channel_open)
def select(self, selection_specs=None, **selection): """Applies selection by dimension name Applies a selection along the dimensions of the object using keyword arguments. The selection may be narrowed to certain objects using selection_specs. For container objects the selection will be applied to all children as well. Selections may select a specific value, slice or set of values: * value: Scalar values will select rows along with an exact match, e.g.: ds.select(x=3) * slice: Slices may be declared as tuples of the upper and lower bound, e.g.: ds.select(x=(0, 3)) * values: A list of values may be selected using a list or set, e.g.: ds.select(x=[0, 1, 2]) Args: selection_specs: List of specs to match on A list of types, functions, or type[.group][.label] strings specifying which objects to apply the selection on. **selection: Dictionary declaring selections by dimension Selections can be scalar values, tuple ranges, lists of discrete values and boolean arrays Returns: Returns an Dimensioned object containing the selected data or a scalar if a single value was selected """ if selection_specs is not None and not isinstance(selection_specs, (list, tuple)): selection_specs = [selection_specs] selection = {dim: sel for dim, sel in selection.items() if dim in self.dimensions()+['selection_mask']} if (selection_specs and not any(self.matches(sp) for sp in selection_specs) or not selection): return self data = self.interface.select(self, **selection) if np.isscalar(data): return data else: return self.clone(data)
Applies selection by dimension name Applies a selection along the dimensions of the object using keyword arguments. The selection may be narrowed to certain objects using selection_specs. For container objects the selection will be applied to all children as well. Selections may select a specific value, slice or set of values: * value: Scalar values will select rows along with an exact match, e.g.: ds.select(x=3) * slice: Slices may be declared as tuples of the upper and lower bound, e.g.: ds.select(x=(0, 3)) * values: A list of values may be selected using a list or set, e.g.: ds.select(x=[0, 1, 2]) Args: selection_specs: List of specs to match on A list of types, functions, or type[.group][.label] strings specifying which objects to apply the selection on. **selection: Dictionary declaring selections by dimension Selections can be scalar values, tuple ranges, lists of discrete values and boolean arrays Returns: Returns an Dimensioned object containing the selected data or a scalar if a single value was selected
Below is the the instruction that describes the task: ### Input: Applies selection by dimension name Applies a selection along the dimensions of the object using keyword arguments. The selection may be narrowed to certain objects using selection_specs. For container objects the selection will be applied to all children as well. Selections may select a specific value, slice or set of values: * value: Scalar values will select rows along with an exact match, e.g.: ds.select(x=3) * slice: Slices may be declared as tuples of the upper and lower bound, e.g.: ds.select(x=(0, 3)) * values: A list of values may be selected using a list or set, e.g.: ds.select(x=[0, 1, 2]) Args: selection_specs: List of specs to match on A list of types, functions, or type[.group][.label] strings specifying which objects to apply the selection on. **selection: Dictionary declaring selections by dimension Selections can be scalar values, tuple ranges, lists of discrete values and boolean arrays Returns: Returns an Dimensioned object containing the selected data or a scalar if a single value was selected ### Response: def select(self, selection_specs=None, **selection): """Applies selection by dimension name Applies a selection along the dimensions of the object using keyword arguments. The selection may be narrowed to certain objects using selection_specs. For container objects the selection will be applied to all children as well. Selections may select a specific value, slice or set of values: * value: Scalar values will select rows along with an exact match, e.g.: ds.select(x=3) * slice: Slices may be declared as tuples of the upper and lower bound, e.g.: ds.select(x=(0, 3)) * values: A list of values may be selected using a list or set, e.g.: ds.select(x=[0, 1, 2]) Args: selection_specs: List of specs to match on A list of types, functions, or type[.group][.label] strings specifying which objects to apply the selection on. **selection: Dictionary declaring selections by dimension Selections can be scalar values, tuple ranges, lists of discrete values and boolean arrays Returns: Returns an Dimensioned object containing the selected data or a scalar if a single value was selected """ if selection_specs is not None and not isinstance(selection_specs, (list, tuple)): selection_specs = [selection_specs] selection = {dim: sel for dim, sel in selection.items() if dim in self.dimensions()+['selection_mask']} if (selection_specs and not any(self.matches(sp) for sp in selection_specs) or not selection): return self data = self.interface.select(self, **selection) if np.isscalar(data): return data else: return self.clone(data)
def setxattr(self, req, ino, name, value, flags): """ Set an extended attribute Valid replies: reply_err """ self.reply_err(req, errno.ENOSYS)
Set an extended attribute Valid replies: reply_err
Below is the the instruction that describes the task: ### Input: Set an extended attribute Valid replies: reply_err ### Response: def setxattr(self, req, ino, name, value, flags): """ Set an extended attribute Valid replies: reply_err """ self.reply_err(req, errno.ENOSYS)
def tfhub_cache_dir(default_cache_dir=None, use_temp=False): """Returns cache directory. Returns cache directory from either TFHUB_CACHE_DIR environment variable or --tfhub_cache_dir or default, if set. Args: default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR environment variable nor --tfhub_cache_dir are not specified. use_temp: bool, Optional to enable using system's temp directory as a module cache directory if neither default_cache_dir nor --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are specified . """ # Note: We are using FLAGS["tfhub_cache_dir"] (and not FLAGS.tfhub_cache_dir) # to access the flag value in order to avoid parsing argv list. The flags # should have been parsed by now in main() by tf.app.run(). If that was not # the case (say in Colab env) we skip flag parsing because argv may contain # unknown flags. cache_dir = ( os.getenv(_TFHUB_CACHE_DIR, "") or FLAGS["tfhub_cache_dir"].value or default_cache_dir) if not cache_dir and use_temp: # Place all TF-Hub modules under <system's temp>/tfhub_modules. cache_dir = os.path.join(tempfile.gettempdir(), "tfhub_modules") if cache_dir: logging.log_first_n(logging.INFO, "Using %s to cache modules.", 1, cache_dir) return cache_dir
Returns cache directory. Returns cache directory from either TFHUB_CACHE_DIR environment variable or --tfhub_cache_dir or default, if set. Args: default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR environment variable nor --tfhub_cache_dir are not specified. use_temp: bool, Optional to enable using system's temp directory as a module cache directory if neither default_cache_dir nor --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are specified .
Below is the the instruction that describes the task: ### Input: Returns cache directory. Returns cache directory from either TFHUB_CACHE_DIR environment variable or --tfhub_cache_dir or default, if set. Args: default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR environment variable nor --tfhub_cache_dir are not specified. use_temp: bool, Optional to enable using system's temp directory as a module cache directory if neither default_cache_dir nor --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are specified . ### Response: def tfhub_cache_dir(default_cache_dir=None, use_temp=False): """Returns cache directory. Returns cache directory from either TFHUB_CACHE_DIR environment variable or --tfhub_cache_dir or default, if set. Args: default_cache_dir: Default cache location to use if neither TFHUB_CACHE_DIR environment variable nor --tfhub_cache_dir are not specified. use_temp: bool, Optional to enable using system's temp directory as a module cache directory if neither default_cache_dir nor --tfhub_cache_dir nor TFHUB_CACHE_DIR environment variable are specified . """ # Note: We are using FLAGS["tfhub_cache_dir"] (and not FLAGS.tfhub_cache_dir) # to access the flag value in order to avoid parsing argv list. The flags # should have been parsed by now in main() by tf.app.run(). If that was not # the case (say in Colab env) we skip flag parsing because argv may contain # unknown flags. cache_dir = ( os.getenv(_TFHUB_CACHE_DIR, "") or FLAGS["tfhub_cache_dir"].value or default_cache_dir) if not cache_dir and use_temp: # Place all TF-Hub modules under <system's temp>/tfhub_modules. cache_dir = os.path.join(tempfile.gettempdir(), "tfhub_modules") if cache_dir: logging.log_first_n(logging.INFO, "Using %s to cache modules.", 1, cache_dir) return cache_dir
def create_consumer(user, password, config_file_path, process_message, *args, **kwargs): '''Create a pulse consumer. Call listen() to start listening.''' queue_config = _read_json_file(filepath=config_file_path) exchanges = map(lambda x: queue_config['sources'][x]['exchange'], queue_config['sources'].keys()) topics = map(lambda x: queue_config['sources'][x]['topic'], queue_config['sources'].keys()) for i in range(0, len(exchanges)): LOG.info('Listening to (%s, %s)' % (exchanges[i], topics[i])) pulse_args = { # If the queue exists and is durable it should match 'durable': queue_config['durable'] in ('true', 'True'), 'password': password, 'topic': topics, 'user': user } if queue_config.get('applabel') is not None: pulse_args['applabel'] = queue_config['applabel'] return PulseReplayConsumer( # A list with the exchanges of each element under 'sources' exchanges=exchanges, callback=process_message, **pulse_args ) return consumer
Create a pulse consumer. Call listen() to start listening.
Below is the the instruction that describes the task: ### Input: Create a pulse consumer. Call listen() to start listening. ### Response: def create_consumer(user, password, config_file_path, process_message, *args, **kwargs): '''Create a pulse consumer. Call listen() to start listening.''' queue_config = _read_json_file(filepath=config_file_path) exchanges = map(lambda x: queue_config['sources'][x]['exchange'], queue_config['sources'].keys()) topics = map(lambda x: queue_config['sources'][x]['topic'], queue_config['sources'].keys()) for i in range(0, len(exchanges)): LOG.info('Listening to (%s, %s)' % (exchanges[i], topics[i])) pulse_args = { # If the queue exists and is durable it should match 'durable': queue_config['durable'] in ('true', 'True'), 'password': password, 'topic': topics, 'user': user } if queue_config.get('applabel') is not None: pulse_args['applabel'] = queue_config['applabel'] return PulseReplayConsumer( # A list with the exchanges of each element under 'sources' exchanges=exchanges, callback=process_message, **pulse_args ) return consumer
def deleteMapping(self, name, vpName, verbose=None): """ Deletes the Visual Property mapping specified by the `vpName` and `name` parameters. :param name: Name of the Visual Style containing the Visual Mapping :param vpName: Name of the Visual Property that the Visual Mapping controls :param verbose: print more :returns: default: successful operation """ response=api(url=self.___url+'styles/'+str(name)+'/mappings/'+str(vpName)+'', method="DELETE", verbose=verbose) return response
Deletes the Visual Property mapping specified by the `vpName` and `name` parameters. :param name: Name of the Visual Style containing the Visual Mapping :param vpName: Name of the Visual Property that the Visual Mapping controls :param verbose: print more :returns: default: successful operation
Below is the the instruction that describes the task: ### Input: Deletes the Visual Property mapping specified by the `vpName` and `name` parameters. :param name: Name of the Visual Style containing the Visual Mapping :param vpName: Name of the Visual Property that the Visual Mapping controls :param verbose: print more :returns: default: successful operation ### Response: def deleteMapping(self, name, vpName, verbose=None): """ Deletes the Visual Property mapping specified by the `vpName` and `name` parameters. :param name: Name of the Visual Style containing the Visual Mapping :param vpName: Name of the Visual Property that the Visual Mapping controls :param verbose: print more :returns: default: successful operation """ response=api(url=self.___url+'styles/'+str(name)+'/mappings/'+str(vpName)+'', method="DELETE", verbose=verbose) return response
def _do_update_packet(self, packet, ip, port): """ React to update packet - people/person on a device have changed :param packet: Packet from client with changes :type packet: paps.si.app.message.APPUpdateMessage :param ip: Client ip address :type ip: unicode :param port: Client port :type port: int :rtype: None """ self.debug("()") device_id = packet.header.device_id if device_id <= Id.SERVER: self.error("ProtocolViolation: Invalid device id") return client = self._clients.get(device_id, None) if not client: self.error("ProtocolViolation: Client is not registered") return key = u"{}:{}".format(ip, port) if client['key'] != key: self.error( u"ProtocolViolation: Client key ({}) has changed: {}".format( client['key'], key ) ) return # Packet info seems ok try: people = packet.people() except ProtocolViolation: self.exception("Failed to decode people from packet") return # Verify same number of people in update as registered to client # (APP specific) if len(people) != len(client['people']): self.error("ProtocolViolation: Incorrect number of people updated") changed = [] # Add ids to all people # Assumes same order here as on the client (e.g from the join()) for index, person in enumerate(people): old = client['people'][index] person.id = old.id if person != old: old.sitting = person.sitting # Maybe sent person to protect access to local saved state changed.append(old) if changed: # Only update if there is really a change try: self.changer.on_person_update(changed) except: self.exception("Failed to notify people update") return else: self.debug("No people updated")
React to update packet - people/person on a device have changed :param packet: Packet from client with changes :type packet: paps.si.app.message.APPUpdateMessage :param ip: Client ip address :type ip: unicode :param port: Client port :type port: int :rtype: None
Below is the the instruction that describes the task: ### Input: React to update packet - people/person on a device have changed :param packet: Packet from client with changes :type packet: paps.si.app.message.APPUpdateMessage :param ip: Client ip address :type ip: unicode :param port: Client port :type port: int :rtype: None ### Response: def _do_update_packet(self, packet, ip, port): """ React to update packet - people/person on a device have changed :param packet: Packet from client with changes :type packet: paps.si.app.message.APPUpdateMessage :param ip: Client ip address :type ip: unicode :param port: Client port :type port: int :rtype: None """ self.debug("()") device_id = packet.header.device_id if device_id <= Id.SERVER: self.error("ProtocolViolation: Invalid device id") return client = self._clients.get(device_id, None) if not client: self.error("ProtocolViolation: Client is not registered") return key = u"{}:{}".format(ip, port) if client['key'] != key: self.error( u"ProtocolViolation: Client key ({}) has changed: {}".format( client['key'], key ) ) return # Packet info seems ok try: people = packet.people() except ProtocolViolation: self.exception("Failed to decode people from packet") return # Verify same number of people in update as registered to client # (APP specific) if len(people) != len(client['people']): self.error("ProtocolViolation: Incorrect number of people updated") changed = [] # Add ids to all people # Assumes same order here as on the client (e.g from the join()) for index, person in enumerate(people): old = client['people'][index] person.id = old.id if person != old: old.sitting = person.sitting # Maybe sent person to protect access to local saved state changed.append(old) if changed: # Only update if there is really a change try: self.changer.on_person_update(changed) except: self.exception("Failed to notify people update") return else: self.debug("No people updated")
def _get_roles(self, username): """ Get roles of a user @str username: name of the user @rtype: dict, format { 'roles': [<list of roles>], 'unusedgroups': [<list of groups not matching roles>] } """ groups = self._get_groups(username) user_roles = self.roles.get_roles(groups) cherrypy.log.error( msg="user '" + username + "' roles: " + str(user_roles), severity=logging.DEBUG, ) return user_roles
Get roles of a user @str username: name of the user @rtype: dict, format { 'roles': [<list of roles>], 'unusedgroups': [<list of groups not matching roles>] }
Below is the the instruction that describes the task: ### Input: Get roles of a user @str username: name of the user @rtype: dict, format { 'roles': [<list of roles>], 'unusedgroups': [<list of groups not matching roles>] } ### Response: def _get_roles(self, username): """ Get roles of a user @str username: name of the user @rtype: dict, format { 'roles': [<list of roles>], 'unusedgroups': [<list of groups not matching roles>] } """ groups = self._get_groups(username) user_roles = self.roles.get_roles(groups) cherrypy.log.error( msg="user '" + username + "' roles: " + str(user_roles), severity=logging.DEBUG, ) return user_roles
def factory(cls, config, db): """ Given a configuration and database, select and return an appropriate instance of a subclass of GetBucketKey. This will ensure that both client and server support are available for the Lua script feature of Redis, and if not, a lock will be used. :param config: A dictionary of compactor options. :param db: A database handle for the Redis database. :returns: An instance of a subclass of GetBucketKey, dependent on the support for the Lua script feature of Redis. """ # Make sure that the client supports register_script() if not hasattr(db, 'register_script'): LOG.debug("Redis client does not support register_script()") return GetBucketKeyByLock(config, db) # OK, the client supports register_script(); what about the # server? info = db.info() if version_greater('2.6', info['redis_version']): LOG.debug("Redis server supports register_script()") return GetBucketKeyByScript(config, db) # OK, use our fallback... LOG.debug("Redis server does not support register_script()") return GetBucketKeyByLock(config, db)
Given a configuration and database, select and return an appropriate instance of a subclass of GetBucketKey. This will ensure that both client and server support are available for the Lua script feature of Redis, and if not, a lock will be used. :param config: A dictionary of compactor options. :param db: A database handle for the Redis database. :returns: An instance of a subclass of GetBucketKey, dependent on the support for the Lua script feature of Redis.
Below is the the instruction that describes the task: ### Input: Given a configuration and database, select and return an appropriate instance of a subclass of GetBucketKey. This will ensure that both client and server support are available for the Lua script feature of Redis, and if not, a lock will be used. :param config: A dictionary of compactor options. :param db: A database handle for the Redis database. :returns: An instance of a subclass of GetBucketKey, dependent on the support for the Lua script feature of Redis. ### Response: def factory(cls, config, db): """ Given a configuration and database, select and return an appropriate instance of a subclass of GetBucketKey. This will ensure that both client and server support are available for the Lua script feature of Redis, and if not, a lock will be used. :param config: A dictionary of compactor options. :param db: A database handle for the Redis database. :returns: An instance of a subclass of GetBucketKey, dependent on the support for the Lua script feature of Redis. """ # Make sure that the client supports register_script() if not hasattr(db, 'register_script'): LOG.debug("Redis client does not support register_script()") return GetBucketKeyByLock(config, db) # OK, the client supports register_script(); what about the # server? info = db.info() if version_greater('2.6', info['redis_version']): LOG.debug("Redis server supports register_script()") return GetBucketKeyByScript(config, db) # OK, use our fallback... LOG.debug("Redis server does not support register_script()") return GetBucketKeyByLock(config, db)
def tasks(self, from_date=DEFAULT_DATETIME): """Retrieve tasks. :param from_date: retrieve tasks that where updated from that date; dates are converted epoch time. """ # Convert 'from_date' to epoch timestamp. # Zero value (1970-01-01 00:00:00) is not allowed for # 'modifiedStart' so it will be set to 1, by default. ts = int(datetime_to_utc(from_date).timestamp()) or 1 consts = { self.PMODIFIED_START: ts } attachments = { self. PPROJECTS: True } params = { self.PCONSTRAINTS: consts, self.PATTACHMENTS: attachments, self.PORDER: self.VOUTDATED, } while True: r = self._call(self.MANIPHEST_TASKS, params) yield r j = json.loads(r) after = j['result']['cursor']['after'] if not after: break params[self.PAFTER] = after
Retrieve tasks. :param from_date: retrieve tasks that where updated from that date; dates are converted epoch time.
Below is the the instruction that describes the task: ### Input: Retrieve tasks. :param from_date: retrieve tasks that where updated from that date; dates are converted epoch time. ### Response: def tasks(self, from_date=DEFAULT_DATETIME): """Retrieve tasks. :param from_date: retrieve tasks that where updated from that date; dates are converted epoch time. """ # Convert 'from_date' to epoch timestamp. # Zero value (1970-01-01 00:00:00) is not allowed for # 'modifiedStart' so it will be set to 1, by default. ts = int(datetime_to_utc(from_date).timestamp()) or 1 consts = { self.PMODIFIED_START: ts } attachments = { self. PPROJECTS: True } params = { self.PCONSTRAINTS: consts, self.PATTACHMENTS: attachments, self.PORDER: self.VOUTDATED, } while True: r = self._call(self.MANIPHEST_TASKS, params) yield r j = json.loads(r) after = j['result']['cursor']['after'] if not after: break params[self.PAFTER] = after
def create(*units): """create this unit within the game as specified""" ret = [] for unit in units: # implemented using sc2simulator.ScenarioUnit x, y = unit.position[:2] pt = Point2D(x=x, y=y) unit.tag = 0 # forget any tag because a new unit will be created new = DebugCommand(create_unit=DebugCreateUnit( unit_type = unit.code, owner = unit.owner, pos = pt, quantity = 1, )) ret.append(new) return ret
create this unit within the game as specified
Below is the the instruction that describes the task: ### Input: create this unit within the game as specified ### Response: def create(*units): """create this unit within the game as specified""" ret = [] for unit in units: # implemented using sc2simulator.ScenarioUnit x, y = unit.position[:2] pt = Point2D(x=x, y=y) unit.tag = 0 # forget any tag because a new unit will be created new = DebugCommand(create_unit=DebugCreateUnit( unit_type = unit.code, owner = unit.owner, pos = pt, quantity = 1, )) ret.append(new) return ret
def which(x): ''' Same as which command on Linux ''' for p in os.environ.get('PATH').split(os.pathsep): p = os.path.join(p, x) if os.path.exists(p): return os.path.abspath(p) return None
Same as which command on Linux
Below is the the instruction that describes the task: ### Input: Same as which command on Linux ### Response: def which(x): ''' Same as which command on Linux ''' for p in os.environ.get('PATH').split(os.pathsep): p = os.path.join(p, x) if os.path.exists(p): return os.path.abspath(p) return None
def get_terrain_height(self, pos: Union[Point2, Point3, Unit]) -> int: """ Returns terrain height at a position. Caution: terrain height is not anywhere near a unit's z-coordinate. """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self._game_info.terrain_height[pos]
Returns terrain height at a position. Caution: terrain height is not anywhere near a unit's z-coordinate.
Below is the the instruction that describes the task: ### Input: Returns terrain height at a position. Caution: terrain height is not anywhere near a unit's z-coordinate. ### Response: def get_terrain_height(self, pos: Union[Point2, Point3, Unit]) -> int: """ Returns terrain height at a position. Caution: terrain height is not anywhere near a unit's z-coordinate. """ assert isinstance(pos, (Point2, Point3, Unit)) pos = pos.position.to2.rounded return self._game_info.terrain_height[pos]
def add_ihex_file(self, filename, overwrite=False): """Open given Intel HEX file and add its records. Set `overwrite` to ``True`` to allow already added data to be overwritten. """ with open(filename, 'r') as fin: self.add_ihex(fin.read(), overwrite)
Open given Intel HEX file and add its records. Set `overwrite` to ``True`` to allow already added data to be overwritten.
Below is the the instruction that describes the task: ### Input: Open given Intel HEX file and add its records. Set `overwrite` to ``True`` to allow already added data to be overwritten. ### Response: def add_ihex_file(self, filename, overwrite=False): """Open given Intel HEX file and add its records. Set `overwrite` to ``True`` to allow already added data to be overwritten. """ with open(filename, 'r') as fin: self.add_ihex(fin.read(), overwrite)
def _get_edge_set(self, tol=0.1): """ Retrieve set of top edges from all of the individual surfaces, downsampling the upper edge based on the specified tolerance """ edges = [] for surface in self.surfaces: if isinstance(surface, GriddedSurface): return edges.append(surface.mesh) elif isinstance(surface, PlanarSurface): # Top edge determined from two end points edge = [] for pnt in [surface.top_left, surface.top_right]: edge.append([pnt.longitude, pnt.latitude, pnt.depth]) edges.append(numpy.array(edge)) elif isinstance(surface, (ComplexFaultSurface, SimpleFaultSurface)): # Rectangular meshes are downsampled to reduce their # overall size edges.append(downsample_trace(surface.mesh, tol)) else: raise ValueError("Surface %s not recognised" % str(surface)) return edges
Retrieve set of top edges from all of the individual surfaces, downsampling the upper edge based on the specified tolerance
Below is the the instruction that describes the task: ### Input: Retrieve set of top edges from all of the individual surfaces, downsampling the upper edge based on the specified tolerance ### Response: def _get_edge_set(self, tol=0.1): """ Retrieve set of top edges from all of the individual surfaces, downsampling the upper edge based on the specified tolerance """ edges = [] for surface in self.surfaces: if isinstance(surface, GriddedSurface): return edges.append(surface.mesh) elif isinstance(surface, PlanarSurface): # Top edge determined from two end points edge = [] for pnt in [surface.top_left, surface.top_right]: edge.append([pnt.longitude, pnt.latitude, pnt.depth]) edges.append(numpy.array(edge)) elif isinstance(surface, (ComplexFaultSurface, SimpleFaultSurface)): # Rectangular meshes are downsampled to reduce their # overall size edges.append(downsample_trace(surface.mesh, tol)) else: raise ValueError("Surface %s not recognised" % str(surface)) return edges
def boolean(self): """A mapping of this `StateVector` to a 2-D array containing all binary bits as booleans, for each time point. """ try: return self._boolean except AttributeError: nbits = len(self.bits) boolean = numpy.zeros((self.size, nbits), dtype=bool) for i, sample in enumerate(self.value): boolean[i, :] = [int(sample) >> j & 1 for j in range(nbits)] self._boolean = Array2D(boolean, name=self.name, x0=self.x0, dx=self.dx, y0=0, dy=1) return self.boolean
A mapping of this `StateVector` to a 2-D array containing all binary bits as booleans, for each time point.
Below is the the instruction that describes the task: ### Input: A mapping of this `StateVector` to a 2-D array containing all binary bits as booleans, for each time point. ### Response: def boolean(self): """A mapping of this `StateVector` to a 2-D array containing all binary bits as booleans, for each time point. """ try: return self._boolean except AttributeError: nbits = len(self.bits) boolean = numpy.zeros((self.size, nbits), dtype=bool) for i, sample in enumerate(self.value): boolean[i, :] = [int(sample) >> j & 1 for j in range(nbits)] self._boolean = Array2D(boolean, name=self.name, x0=self.x0, dx=self.dx, y0=0, dy=1) return self.boolean
def build_wxsfile_default_gui(root): """ This function adds a default GUI to the wxs file """ factory = Document() Product = root.getElementsByTagName('Product')[0] UIRef = factory.createElement('UIRef') UIRef.attributes['Id'] = 'WixUI_Mondo' Product.childNodes.append(UIRef) UIRef = factory.createElement('UIRef') UIRef.attributes['Id'] = 'WixUI_ErrorProgressText' Product.childNodes.append(UIRef)
This function adds a default GUI to the wxs file
Below is the the instruction that describes the task: ### Input: This function adds a default GUI to the wxs file ### Response: def build_wxsfile_default_gui(root): """ This function adds a default GUI to the wxs file """ factory = Document() Product = root.getElementsByTagName('Product')[0] UIRef = factory.createElement('UIRef') UIRef.attributes['Id'] = 'WixUI_Mondo' Product.childNodes.append(UIRef) UIRef = factory.createElement('UIRef') UIRef.attributes['Id'] = 'WixUI_ErrorProgressText' Product.childNodes.append(UIRef)
def from_cryptography(cls, crypto_req): """ Construct based on a ``cryptography`` *crypto_req*. :param crypto_req: A ``cryptography`` X.509 certificate signing request :type crypto_req: ``cryptography.x509.CertificateSigningRequest`` :rtype: X509Req .. versionadded:: 17.1.0 """ if not isinstance(crypto_req, x509.CertificateSigningRequest): raise TypeError("Must be a certificate signing request") req = cls() req._req = crypto_req._x509_req return req
Construct based on a ``cryptography`` *crypto_req*. :param crypto_req: A ``cryptography`` X.509 certificate signing request :type crypto_req: ``cryptography.x509.CertificateSigningRequest`` :rtype: X509Req .. versionadded:: 17.1.0
Below is the the instruction that describes the task: ### Input: Construct based on a ``cryptography`` *crypto_req*. :param crypto_req: A ``cryptography`` X.509 certificate signing request :type crypto_req: ``cryptography.x509.CertificateSigningRequest`` :rtype: X509Req .. versionadded:: 17.1.0 ### Response: def from_cryptography(cls, crypto_req): """ Construct based on a ``cryptography`` *crypto_req*. :param crypto_req: A ``cryptography`` X.509 certificate signing request :type crypto_req: ``cryptography.x509.CertificateSigningRequest`` :rtype: X509Req .. versionadded:: 17.1.0 """ if not isinstance(crypto_req, x509.CertificateSigningRequest): raise TypeError("Must be a certificate signing request") req = cls() req._req = crypto_req._x509_req return req
def find_gsc_offset(image, input_catalog='GSC1', output_catalog='GAIA'): """Find the GSC to GAIA offset based on guide star coordinates Parameters ---------- image : str Filename of image to be processed. Returns ------- delta_ra, delta_dec : tuple of floats Offset in decimal degrees of image based on correction to guide star coordinates relative to GAIA. """ serviceType = "GSCConvert/GSCconvert.aspx" spec_str = "TRANSFORM={}-{}&IPPPSSOOT={}" if 'rootname' in pf.getheader(image): ippssoot = pf.getval(image, 'rootname').upper() else: ippssoot = fu.buildNewRootname(image).upper() spec = spec_str.format(input_catalog, output_catalog, ippssoot) serviceUrl = "{}/{}?{}".format(SERVICELOCATION, serviceType, spec) rawcat = requests.get(serviceUrl) if not rawcat.ok: log.info("Problem accessing service with:\n{{}".format(serviceUrl)) raise ValueError delta_ra = delta_dec = None tree = BytesIO(rawcat.content) for _, element in etree.iterparse(tree): if element.tag == 'deltaRA': delta_ra = float(element.text) elif element.tag == 'deltaDEC': delta_dec = float(element.text) return delta_ra, delta_dec
Find the GSC to GAIA offset based on guide star coordinates Parameters ---------- image : str Filename of image to be processed. Returns ------- delta_ra, delta_dec : tuple of floats Offset in decimal degrees of image based on correction to guide star coordinates relative to GAIA.
Below is the the instruction that describes the task: ### Input: Find the GSC to GAIA offset based on guide star coordinates Parameters ---------- image : str Filename of image to be processed. Returns ------- delta_ra, delta_dec : tuple of floats Offset in decimal degrees of image based on correction to guide star coordinates relative to GAIA. ### Response: def find_gsc_offset(image, input_catalog='GSC1', output_catalog='GAIA'): """Find the GSC to GAIA offset based on guide star coordinates Parameters ---------- image : str Filename of image to be processed. Returns ------- delta_ra, delta_dec : tuple of floats Offset in decimal degrees of image based on correction to guide star coordinates relative to GAIA. """ serviceType = "GSCConvert/GSCconvert.aspx" spec_str = "TRANSFORM={}-{}&IPPPSSOOT={}" if 'rootname' in pf.getheader(image): ippssoot = pf.getval(image, 'rootname').upper() else: ippssoot = fu.buildNewRootname(image).upper() spec = spec_str.format(input_catalog, output_catalog, ippssoot) serviceUrl = "{}/{}?{}".format(SERVICELOCATION, serviceType, spec) rawcat = requests.get(serviceUrl) if not rawcat.ok: log.info("Problem accessing service with:\n{{}".format(serviceUrl)) raise ValueError delta_ra = delta_dec = None tree = BytesIO(rawcat.content) for _, element in etree.iterparse(tree): if element.tag == 'deltaRA': delta_ra = float(element.text) elif element.tag == 'deltaDEC': delta_dec = float(element.text) return delta_ra, delta_dec
def transform(self, X, perplexity=5, initialization="median", k=25, learning_rate=1, n_iter=100, exaggeration=2, momentum=0): """Embed new points into the existing embedding. This procedure optimizes each point only with respect to the existing embedding i.e. it ignores any interactions between the points in ``X`` among themselves. Please see the :ref:`parameter-guide` for more information. Parameters ---------- X: np.ndarray The data matrix to be added to the existing embedding. perplexity: float Perplexity can be thought of as the continuous :math:`k` number of nearest neighbors, for which t-SNE will attempt to preserve distances. However, when transforming, we only consider neighbors in the existing embedding i.e. each data point is placed into the embedding, independently of other new data points. initialization: Union[np.ndarray, str] The initial point positions to be used in the embedding space. Can be a precomputed numpy array, ``median``, ``weighted`` or ``random``. In all cases, ``median`` of ``weighted`` should be preferred. k: int The number of nearest neighbors to consider when initially placing the point onto the embedding. This is different from ``perpelxity`` because perplexity affects optimization while this only affects the initial point positions. learning_rate: float The learning rate for t-SNE optimization. Typical values range between 100 to 1000. Setting the learning rate too low or too high may result in the points forming a "ball". This is also known as the crowding problem. n_iter: int The number of iterations to run in the normal optimization regime. Typically, the number of iterations needed when adding new data points is much lower than with regular optimization. exaggeration: float The exaggeration factor to use during the normal optimization phase. This can be used to form more densely packed clusters and is useful for large data sets. momentum: float The momentum to use during optimization phase. Returns ------- PartialTSNEEmbedding The positions of the new points in the embedding space. """ # We check if the affinity `to_new` methods takes the `perplexity` # parameter and raise an informative error if not. This happes when the # user uses a non-standard affinity class e.g. multiscale, then attempts # to add points via `transform`. These classes take `perplexities` and # fail affinity_signature = inspect.signature(self.affinities.to_new) if "perplexity" not in affinity_signature.parameters: raise TypeError( "`transform` currently does not support non `%s` type affinity " "classes. Please use `prepare_partial` and `optimize` to add " "points to the embedding." % PerplexityBasedNN.__name__ ) embedding = self.prepare_partial( X, perplexity=perplexity, initialization=initialization, k=k ) try: embedding.optimize( n_iter=n_iter, learning_rate=learning_rate, exaggeration=exaggeration, momentum=momentum, inplace=True, propagate_exception=True, ) except OptimizationInterrupt as ex: log.info("Optimization was interrupted with callback.") embedding = ex.final_embedding return embedding
Embed new points into the existing embedding. This procedure optimizes each point only with respect to the existing embedding i.e. it ignores any interactions between the points in ``X`` among themselves. Please see the :ref:`parameter-guide` for more information. Parameters ---------- X: np.ndarray The data matrix to be added to the existing embedding. perplexity: float Perplexity can be thought of as the continuous :math:`k` number of nearest neighbors, for which t-SNE will attempt to preserve distances. However, when transforming, we only consider neighbors in the existing embedding i.e. each data point is placed into the embedding, independently of other new data points. initialization: Union[np.ndarray, str] The initial point positions to be used in the embedding space. Can be a precomputed numpy array, ``median``, ``weighted`` or ``random``. In all cases, ``median`` of ``weighted`` should be preferred. k: int The number of nearest neighbors to consider when initially placing the point onto the embedding. This is different from ``perpelxity`` because perplexity affects optimization while this only affects the initial point positions. learning_rate: float The learning rate for t-SNE optimization. Typical values range between 100 to 1000. Setting the learning rate too low or too high may result in the points forming a "ball". This is also known as the crowding problem. n_iter: int The number of iterations to run in the normal optimization regime. Typically, the number of iterations needed when adding new data points is much lower than with regular optimization. exaggeration: float The exaggeration factor to use during the normal optimization phase. This can be used to form more densely packed clusters and is useful for large data sets. momentum: float The momentum to use during optimization phase. Returns ------- PartialTSNEEmbedding The positions of the new points in the embedding space.
Below is the the instruction that describes the task: ### Input: Embed new points into the existing embedding. This procedure optimizes each point only with respect to the existing embedding i.e. it ignores any interactions between the points in ``X`` among themselves. Please see the :ref:`parameter-guide` for more information. Parameters ---------- X: np.ndarray The data matrix to be added to the existing embedding. perplexity: float Perplexity can be thought of as the continuous :math:`k` number of nearest neighbors, for which t-SNE will attempt to preserve distances. However, when transforming, we only consider neighbors in the existing embedding i.e. each data point is placed into the embedding, independently of other new data points. initialization: Union[np.ndarray, str] The initial point positions to be used in the embedding space. Can be a precomputed numpy array, ``median``, ``weighted`` or ``random``. In all cases, ``median`` of ``weighted`` should be preferred. k: int The number of nearest neighbors to consider when initially placing the point onto the embedding. This is different from ``perpelxity`` because perplexity affects optimization while this only affects the initial point positions. learning_rate: float The learning rate for t-SNE optimization. Typical values range between 100 to 1000. Setting the learning rate too low or too high may result in the points forming a "ball". This is also known as the crowding problem. n_iter: int The number of iterations to run in the normal optimization regime. Typically, the number of iterations needed when adding new data points is much lower than with regular optimization. exaggeration: float The exaggeration factor to use during the normal optimization phase. This can be used to form more densely packed clusters and is useful for large data sets. momentum: float The momentum to use during optimization phase. Returns ------- PartialTSNEEmbedding The positions of the new points in the embedding space. ### Response: def transform(self, X, perplexity=5, initialization="median", k=25, learning_rate=1, n_iter=100, exaggeration=2, momentum=0): """Embed new points into the existing embedding. This procedure optimizes each point only with respect to the existing embedding i.e. it ignores any interactions between the points in ``X`` among themselves. Please see the :ref:`parameter-guide` for more information. Parameters ---------- X: np.ndarray The data matrix to be added to the existing embedding. perplexity: float Perplexity can be thought of as the continuous :math:`k` number of nearest neighbors, for which t-SNE will attempt to preserve distances. However, when transforming, we only consider neighbors in the existing embedding i.e. each data point is placed into the embedding, independently of other new data points. initialization: Union[np.ndarray, str] The initial point positions to be used in the embedding space. Can be a precomputed numpy array, ``median``, ``weighted`` or ``random``. In all cases, ``median`` of ``weighted`` should be preferred. k: int The number of nearest neighbors to consider when initially placing the point onto the embedding. This is different from ``perpelxity`` because perplexity affects optimization while this only affects the initial point positions. learning_rate: float The learning rate for t-SNE optimization. Typical values range between 100 to 1000. Setting the learning rate too low or too high may result in the points forming a "ball". This is also known as the crowding problem. n_iter: int The number of iterations to run in the normal optimization regime. Typically, the number of iterations needed when adding new data points is much lower than with regular optimization. exaggeration: float The exaggeration factor to use during the normal optimization phase. This can be used to form more densely packed clusters and is useful for large data sets. momentum: float The momentum to use during optimization phase. Returns ------- PartialTSNEEmbedding The positions of the new points in the embedding space. """ # We check if the affinity `to_new` methods takes the `perplexity` # parameter and raise an informative error if not. This happes when the # user uses a non-standard affinity class e.g. multiscale, then attempts # to add points via `transform`. These classes take `perplexities` and # fail affinity_signature = inspect.signature(self.affinities.to_new) if "perplexity" not in affinity_signature.parameters: raise TypeError( "`transform` currently does not support non `%s` type affinity " "classes. Please use `prepare_partial` and `optimize` to add " "points to the embedding." % PerplexityBasedNN.__name__ ) embedding = self.prepare_partial( X, perplexity=perplexity, initialization=initialization, k=k ) try: embedding.optimize( n_iter=n_iter, learning_rate=learning_rate, exaggeration=exaggeration, momentum=momentum, inplace=True, propagate_exception=True, ) except OptimizationInterrupt as ex: log.info("Optimization was interrupted with callback.") embedding = ex.final_embedding return embedding
def process_answer(self, user, item, asked, answered, time, answer, response_time, guess, **kwargs): """ This method is used during the answer streaming and is called after the predictive model for each answer. Args: user (int): identifier of ther user answering the question asked (int): identifier of the asked item answered (int): identifier of the answered item or None if the user answered "I don't know" response_time (int) time the answer took in milliseconds time (datetime.datetime) time when the user answered the question guess (float): probability of correct response in case of random answer """ pass
This method is used during the answer streaming and is called after the predictive model for each answer. Args: user (int): identifier of ther user answering the question asked (int): identifier of the asked item answered (int): identifier of the answered item or None if the user answered "I don't know" response_time (int) time the answer took in milliseconds time (datetime.datetime) time when the user answered the question guess (float): probability of correct response in case of random answer
Below is the the instruction that describes the task: ### Input: This method is used during the answer streaming and is called after the predictive model for each answer. Args: user (int): identifier of ther user answering the question asked (int): identifier of the asked item answered (int): identifier of the answered item or None if the user answered "I don't know" response_time (int) time the answer took in milliseconds time (datetime.datetime) time when the user answered the question guess (float): probability of correct response in case of random answer ### Response: def process_answer(self, user, item, asked, answered, time, answer, response_time, guess, **kwargs): """ This method is used during the answer streaming and is called after the predictive model for each answer. Args: user (int): identifier of ther user answering the question asked (int): identifier of the asked item answered (int): identifier of the answered item or None if the user answered "I don't know" response_time (int) time the answer took in milliseconds time (datetime.datetime) time when the user answered the question guess (float): probability of correct response in case of random answer """ pass
def link(self, x,y,w,h,link): "Put a link on the page" if not self.page in self.page_links: self.page_links[self.page] = [] self.page_links[self.page] += [(x*self.k,self.h_pt-y*self.k,w*self.k,h*self.k,link),]
Put a link on the page
Below is the the instruction that describes the task: ### Input: Put a link on the page ### Response: def link(self, x,y,w,h,link): "Put a link on the page" if not self.page in self.page_links: self.page_links[self.page] = [] self.page_links[self.page] += [(x*self.k,self.h_pt-y*self.k,w*self.k,h*self.k,link),]
def p_Callable(p): ''' Callable : NsContentName LPARENT | NsContentName SCOPEOP INDENTIFIER LPARENT | Expression LPARENT | STATIC SCOPEOP INDENTIFIER LPARENT ''' if len(p) <= 3: p[0] = Callable(p[1], None) else: p[0] = Callable(p[1], p[3])
Callable : NsContentName LPARENT | NsContentName SCOPEOP INDENTIFIER LPARENT | Expression LPARENT | STATIC SCOPEOP INDENTIFIER LPARENT
Below is the the instruction that describes the task: ### Input: Callable : NsContentName LPARENT | NsContentName SCOPEOP INDENTIFIER LPARENT | Expression LPARENT | STATIC SCOPEOP INDENTIFIER LPARENT ### Response: def p_Callable(p): ''' Callable : NsContentName LPARENT | NsContentName SCOPEOP INDENTIFIER LPARENT | Expression LPARENT | STATIC SCOPEOP INDENTIFIER LPARENT ''' if len(p) <= 3: p[0] = Callable(p[1], None) else: p[0] = Callable(p[1], p[3])
def _evalFunction(individual, name_values, X, y, scorer, cv, iid, fit_params, verbose=0, error_score='raise', score_cache={}): """ Developer Note: -------------------- score_cache was purposefully moved to parameters, and given a dict reference. It will be modified in-place by _evalFunction based on it's reference. This is to allow for a managed, paralell memoization dict, and also for different memoization per instance of EvolutionaryAlgorithmSearchCV. Remember that dicts created inside function definitions are presistent between calls, So unless it is replaced this function will be memoized each call automatically. """ parameters = _individual_to_params(individual, name_values) score = 0 n_test = 0 paramkey = str(individual) if paramkey in score_cache: score = score_cache[paramkey] else: for train, test in cv.split(X, y): assert len(train) > 0 and len(test) > 0, "Training and/or testing not long enough for evaluation." _score = _fit_and_score(estimator=individual.est, X=X, y=y, scorer=scorer, train=train, test=test, verbose=verbose, parameters=parameters, fit_params=fit_params, error_score=error_score)[0] if iid: score += _score * len(test) n_test += len(test) else: score += _score n_test += 1 assert n_test > 0, "No fitting was accomplished, check data and cross validation method." score /= float(n_test) score_cache[paramkey] = score return (score,)
Developer Note: -------------------- score_cache was purposefully moved to parameters, and given a dict reference. It will be modified in-place by _evalFunction based on it's reference. This is to allow for a managed, paralell memoization dict, and also for different memoization per instance of EvolutionaryAlgorithmSearchCV. Remember that dicts created inside function definitions are presistent between calls, So unless it is replaced this function will be memoized each call automatically.
Below is the the instruction that describes the task: ### Input: Developer Note: -------------------- score_cache was purposefully moved to parameters, and given a dict reference. It will be modified in-place by _evalFunction based on it's reference. This is to allow for a managed, paralell memoization dict, and also for different memoization per instance of EvolutionaryAlgorithmSearchCV. Remember that dicts created inside function definitions are presistent between calls, So unless it is replaced this function will be memoized each call automatically. ### Response: def _evalFunction(individual, name_values, X, y, scorer, cv, iid, fit_params, verbose=0, error_score='raise', score_cache={}): """ Developer Note: -------------------- score_cache was purposefully moved to parameters, and given a dict reference. It will be modified in-place by _evalFunction based on it's reference. This is to allow for a managed, paralell memoization dict, and also for different memoization per instance of EvolutionaryAlgorithmSearchCV. Remember that dicts created inside function definitions are presistent between calls, So unless it is replaced this function will be memoized each call automatically. """ parameters = _individual_to_params(individual, name_values) score = 0 n_test = 0 paramkey = str(individual) if paramkey in score_cache: score = score_cache[paramkey] else: for train, test in cv.split(X, y): assert len(train) > 0 and len(test) > 0, "Training and/or testing not long enough for evaluation." _score = _fit_and_score(estimator=individual.est, X=X, y=y, scorer=scorer, train=train, test=test, verbose=verbose, parameters=parameters, fit_params=fit_params, error_score=error_score)[0] if iid: score += _score * len(test) n_test += len(test) else: score += _score n_test += 1 assert n_test > 0, "No fitting was accomplished, check data and cross validation method." score /= float(n_test) score_cache[paramkey] = score return (score,)
def unregister_provider(self, provider): """ Unregister a provider. Blocks until this RpcConsumer is unregistered from its QueueConsumer, which only happens when all providers have asked to unregister. """ self._unregistering_providers.add(provider) remaining_providers = self._providers - self._unregistering_providers if not remaining_providers: _log.debug('unregistering from queueconsumer %s', self) self.queue_consumer.unregister_provider(self) _log.debug('unregistered from queueconsumer %s', self) self._unregistered_from_queue_consumer.send(True) _log.debug('waiting for unregister from queue consumer %s', self) self._unregistered_from_queue_consumer.wait() super(RpcConsumer, self).unregister_provider(provider)
Unregister a provider. Blocks until this RpcConsumer is unregistered from its QueueConsumer, which only happens when all providers have asked to unregister.
Below is the the instruction that describes the task: ### Input: Unregister a provider. Blocks until this RpcConsumer is unregistered from its QueueConsumer, which only happens when all providers have asked to unregister. ### Response: def unregister_provider(self, provider): """ Unregister a provider. Blocks until this RpcConsumer is unregistered from its QueueConsumer, which only happens when all providers have asked to unregister. """ self._unregistering_providers.add(provider) remaining_providers = self._providers - self._unregistering_providers if not remaining_providers: _log.debug('unregistering from queueconsumer %s', self) self.queue_consumer.unregister_provider(self) _log.debug('unregistered from queueconsumer %s', self) self._unregistered_from_queue_consumer.send(True) _log.debug('waiting for unregister from queue consumer %s', self) self._unregistered_from_queue_consumer.wait() super(RpcConsumer, self).unregister_provider(provider)
def get_header(results): '''Extracts the headers, using the first value in the dict as the template''' ret = ['name', ] values = next(iter(results.values())) for k, v in values.items(): if isinstance(v, dict): for metric in v.keys(): ret.append('%s:%s' % (k, metric)) else: ret.append(k) return ret
Extracts the headers, using the first value in the dict as the template
Below is the the instruction that describes the task: ### Input: Extracts the headers, using the first value in the dict as the template ### Response: def get_header(results): '''Extracts the headers, using the first value in the dict as the template''' ret = ['name', ] values = next(iter(results.values())) for k, v in values.items(): if isinstance(v, dict): for metric in v.keys(): ret.append('%s:%s' % (k, metric)) else: ret.append(k) return ret
def sse_event(event=None, data=None, id=None, retry=None, comment=None, encoding='utf-8'): """Encode a Server-Sent Event (SSE). At least one field must be present. All fields are strings, except retry, which must be an integer. The event and id fields can not contain newlines. """ if all(x is None for x in [event, data, id, retry, comment]): raise TypeError("Event must have at least one field") if event and any(c in event for c in '\r\n'): raise ValueError("'event' can not contain newlines: '%s'" % event) if id and any(c in id for c in '\r\n'): raise ValueError("'id' can not contain newlines: '%s'" % id) return ''.join([ _sse_encode('', comment) if comment is not None else '', _sse_encode('id', id) if id is not None else '', _sse_encode('event', event) if event is not None else '', _sse_encode('retry', str(int(retry))) if retry is not None else '', _sse_encode('data', data) if data is not None else '', '\n', ]).encode(encoding)
Encode a Server-Sent Event (SSE). At least one field must be present. All fields are strings, except retry, which must be an integer. The event and id fields can not contain newlines.
Below is the the instruction that describes the task: ### Input: Encode a Server-Sent Event (SSE). At least one field must be present. All fields are strings, except retry, which must be an integer. The event and id fields can not contain newlines. ### Response: def sse_event(event=None, data=None, id=None, retry=None, comment=None, encoding='utf-8'): """Encode a Server-Sent Event (SSE). At least one field must be present. All fields are strings, except retry, which must be an integer. The event and id fields can not contain newlines. """ if all(x is None for x in [event, data, id, retry, comment]): raise TypeError("Event must have at least one field") if event and any(c in event for c in '\r\n'): raise ValueError("'event' can not contain newlines: '%s'" % event) if id and any(c in id for c in '\r\n'): raise ValueError("'id' can not contain newlines: '%s'" % id) return ''.join([ _sse_encode('', comment) if comment is not None else '', _sse_encode('id', id) if id is not None else '', _sse_encode('event', event) if event is not None else '', _sse_encode('retry', str(int(retry))) if retry is not None else '', _sse_encode('data', data) if data is not None else '', '\n', ]).encode(encoding)
def raise_error(name=None, args=None, message=''): ''' Raise an exception with __name__ from name, args from args If args is None Otherwise message from message\ If name is empty then use "Exception" ''' name = name or 'Exception' if hasattr(salt.exceptions, name): ex = getattr(salt.exceptions, name) elif hasattr(exceptions, name): ex = getattr(exceptions, name) else: name = 'SaltException' ex = getattr(salt.exceptions, name) if args is not None: raise ex(*args) else: raise ex(message)
Raise an exception with __name__ from name, args from args If args is None Otherwise message from message\ If name is empty then use "Exception"
Below is the the instruction that describes the task: ### Input: Raise an exception with __name__ from name, args from args If args is None Otherwise message from message\ If name is empty then use "Exception" ### Response: def raise_error(name=None, args=None, message=''): ''' Raise an exception with __name__ from name, args from args If args is None Otherwise message from message\ If name is empty then use "Exception" ''' name = name or 'Exception' if hasattr(salt.exceptions, name): ex = getattr(salt.exceptions, name) elif hasattr(exceptions, name): ex = getattr(exceptions, name) else: name = 'SaltException' ex = getattr(salt.exceptions, name) if args is not None: raise ex(*args) else: raise ex(message)
def detect_change_mode(text,change): "returns 'add' 'delete' or 'internal'. see comments to update_changes for more details." # warning: some wacky diff logic (in python, probably in JS too) is making some adds / deletes look like replacements (see notes at bottom of diff.py) if len(change.deltas)>1: return 'internal' # todo below: why are blank deltas getting sent? is it a new seg thing? I'm picking 'add' because it has to be in some category and add is most likely next if this is a new seg. elif not change.deltas: return 'add' delta,=change.deltas # intentional crash if len(deltas)!=1 if delta.a==delta.b and delta.a==len(text): return 'add' elif delta.b==len(text) and len(delta.text)==0: return 'delete' else: return 'internal'
returns 'add' 'delete' or 'internal'. see comments to update_changes for more details.
Below is the the instruction that describes the task: ### Input: returns 'add' 'delete' or 'internal'. see comments to update_changes for more details. ### Response: def detect_change_mode(text,change): "returns 'add' 'delete' or 'internal'. see comments to update_changes for more details." # warning: some wacky diff logic (in python, probably in JS too) is making some adds / deletes look like replacements (see notes at bottom of diff.py) if len(change.deltas)>1: return 'internal' # todo below: why are blank deltas getting sent? is it a new seg thing? I'm picking 'add' because it has to be in some category and add is most likely next if this is a new seg. elif not change.deltas: return 'add' delta,=change.deltas # intentional crash if len(deltas)!=1 if delta.a==delta.b and delta.a==len(text): return 'add' elif delta.b==len(text) and len(delta.text)==0: return 'delete' else: return 'internal'
def list_cmd(only_active, only_aliases, verbose): """List indices.""" def _tree_print(d, rec_list=None, verbose=False, indent=2): # Note that on every recursion rec_list is copied, # which might not be very effective for very deep dictionaries. rec_list = rec_list or [] for idx, key in enumerate(sorted(d)): line = (['│' + ' ' * indent if i == 1 else ' ' * (indent+1) for i in rec_list]) line.append('└──' if len(d)-1 == idx else '├──') click.echo(''.join(line), nl=False) if isinstance(d[key], dict): click.echo(key) new_rec_list = rec_list + [0 if len(d)-1 == idx else 1] _tree_print(d[key], new_rec_list, verbose) else: leaf_txt = '{} -> {}'.format(key, d[key]) if verbose else key click.echo(leaf_txt) aliases = (current_search.active_aliases if only_active else current_search.aliases) active_aliases = current_search.active_aliases if only_aliases: click.echo(json.dumps(list((aliases.keys())), indent=4)) else: # Mark active indices for printout aliases = {(k + (' *' if k in active_aliases else '')): v for k, v in aliases.items()} click.echo(_tree_print(aliases, verbose=verbose))
List indices.
Below is the the instruction that describes the task: ### Input: List indices. ### Response: def list_cmd(only_active, only_aliases, verbose): """List indices.""" def _tree_print(d, rec_list=None, verbose=False, indent=2): # Note that on every recursion rec_list is copied, # which might not be very effective for very deep dictionaries. rec_list = rec_list or [] for idx, key in enumerate(sorted(d)): line = (['│' + ' ' * indent if i == 1 else ' ' * (indent+1) for i in rec_list]) line.append('└──' if len(d)-1 == idx else '├──') click.echo(''.join(line), nl=False) if isinstance(d[key], dict): click.echo(key) new_rec_list = rec_list + [0 if len(d)-1 == idx else 1] _tree_print(d[key], new_rec_list, verbose) else: leaf_txt = '{} -> {}'.format(key, d[key]) if verbose else key click.echo(leaf_txt) aliases = (current_search.active_aliases if only_active else current_search.aliases) active_aliases = current_search.active_aliases if only_aliases: click.echo(json.dumps(list((aliases.keys())), indent=4)) else: # Mark active indices for printout aliases = {(k + (' *' if k in active_aliases else '')): v for k, v in aliases.items()} click.echo(_tree_print(aliases, verbose=verbose))
def execute(commands, serial=None): """ Sends the command to the connected micro:bit via serial and returns the result. If no serial connection is provided, attempts to autodetect the device. For this to work correctly, a particular sequence of commands needs to be sent to put the device into a good state to process the incoming command. Returns the stdout and stderr output from the micro:bit. """ close_serial = False if serial is None: serial = get_serial() close_serial = True time.sleep(0.1) result = b'' raw_on(serial) time.sleep(0.1) # Write the actual command and send CTRL-D to evaluate. for command in commands: command_bytes = command.encode('utf-8') for i in range(0, len(command_bytes), 32): serial.write(command_bytes[i:min(i + 32, len(command_bytes))]) time.sleep(0.01) serial.write(b'\x04') response = serial.read_until(b'\x04>') # Read until prompt. out, err = response[2:-2].split(b'\x04', 1) # Split stdout, stderr result += out if err: return b'', err time.sleep(0.1) raw_off(serial) if close_serial: serial.close() time.sleep(0.1) return result, err
Sends the command to the connected micro:bit via serial and returns the result. If no serial connection is provided, attempts to autodetect the device. For this to work correctly, a particular sequence of commands needs to be sent to put the device into a good state to process the incoming command. Returns the stdout and stderr output from the micro:bit.
Below is the the instruction that describes the task: ### Input: Sends the command to the connected micro:bit via serial and returns the result. If no serial connection is provided, attempts to autodetect the device. For this to work correctly, a particular sequence of commands needs to be sent to put the device into a good state to process the incoming command. Returns the stdout and stderr output from the micro:bit. ### Response: def execute(commands, serial=None): """ Sends the command to the connected micro:bit via serial and returns the result. If no serial connection is provided, attempts to autodetect the device. For this to work correctly, a particular sequence of commands needs to be sent to put the device into a good state to process the incoming command. Returns the stdout and stderr output from the micro:bit. """ close_serial = False if serial is None: serial = get_serial() close_serial = True time.sleep(0.1) result = b'' raw_on(serial) time.sleep(0.1) # Write the actual command and send CTRL-D to evaluate. for command in commands: command_bytes = command.encode('utf-8') for i in range(0, len(command_bytes), 32): serial.write(command_bytes[i:min(i + 32, len(command_bytes))]) time.sleep(0.01) serial.write(b'\x04') response = serial.read_until(b'\x04>') # Read until prompt. out, err = response[2:-2].split(b'\x04', 1) # Split stdout, stderr result += out if err: return b'', err time.sleep(0.1) raw_off(serial) if close_serial: serial.close() time.sleep(0.1) return result, err
def pad_rem(pv, size=None): """ Removes linear padding from array This is a convenience function that does the opposite of `pad_add`. Parameters ---------- pv : 1D or 2D ndarray The array from which the padding will be removed. size : tuple of length 1 (1D) or 2 (2D), optional The final size of the un-padded array. Defaults to half the size of the input array. Returns ------- pv : 1D or 2D ndarray Padded array `av` with pads appended to right and bottom. """ if size is None: size = list() for s in pv.shape: assert s % 2 == 0, "Uneven size; specify correct size of output!" size.append(int(s/2)) elif not hasattr(size, "__len__"): size = [size] assert len(pv.shape) in [1, 2], "Only 1D and 2D arrays!" assert len(pv.shape) == len( size), "`size` must have same length as `av.shape`!" if len(pv.shape) == 2: return pv[:size[0], :size[1]] else: return pv[:size[0]]
Removes linear padding from array This is a convenience function that does the opposite of `pad_add`. Parameters ---------- pv : 1D or 2D ndarray The array from which the padding will be removed. size : tuple of length 1 (1D) or 2 (2D), optional The final size of the un-padded array. Defaults to half the size of the input array. Returns ------- pv : 1D or 2D ndarray Padded array `av` with pads appended to right and bottom.
Below is the the instruction that describes the task: ### Input: Removes linear padding from array This is a convenience function that does the opposite of `pad_add`. Parameters ---------- pv : 1D or 2D ndarray The array from which the padding will be removed. size : tuple of length 1 (1D) or 2 (2D), optional The final size of the un-padded array. Defaults to half the size of the input array. Returns ------- pv : 1D or 2D ndarray Padded array `av` with pads appended to right and bottom. ### Response: def pad_rem(pv, size=None): """ Removes linear padding from array This is a convenience function that does the opposite of `pad_add`. Parameters ---------- pv : 1D or 2D ndarray The array from which the padding will be removed. size : tuple of length 1 (1D) or 2 (2D), optional The final size of the un-padded array. Defaults to half the size of the input array. Returns ------- pv : 1D or 2D ndarray Padded array `av` with pads appended to right and bottom. """ if size is None: size = list() for s in pv.shape: assert s % 2 == 0, "Uneven size; specify correct size of output!" size.append(int(s/2)) elif not hasattr(size, "__len__"): size = [size] assert len(pv.shape) in [1, 2], "Only 1D and 2D arrays!" assert len(pv.shape) == len( size), "`size` must have same length as `av.shape`!" if len(pv.shape) == 2: return pv[:size[0], :size[1]] else: return pv[:size[0]]
def getISOSetupList(self): """ Get individual ISO transfer's setup. Returns a list of dicts, each containing an individual ISO transfer parameters: - length - actual_length - status (see libusb1's API documentation for their signification) Returned list is consistent with getISOBufferList return value. Should not be called on a submitted transfer (except for 'length' values). """ transfer_p = self.__transfer transfer = transfer_p.contents # pylint: disable=undefined-variable if transfer.type != TRANSFER_TYPE_ISOCHRONOUS: # pylint: enable=undefined-variable raise TypeError( 'This method cannot be called on non-iso transfers.' ) return [ { 'length': x.length, 'actual_length': x.actual_length, 'status': x.status, } for x in libusb1.get_iso_packet_list(transfer_p) ]
Get individual ISO transfer's setup. Returns a list of dicts, each containing an individual ISO transfer parameters: - length - actual_length - status (see libusb1's API documentation for their signification) Returned list is consistent with getISOBufferList return value. Should not be called on a submitted transfer (except for 'length' values).
Below is the the instruction that describes the task: ### Input: Get individual ISO transfer's setup. Returns a list of dicts, each containing an individual ISO transfer parameters: - length - actual_length - status (see libusb1's API documentation for their signification) Returned list is consistent with getISOBufferList return value. Should not be called on a submitted transfer (except for 'length' values). ### Response: def getISOSetupList(self): """ Get individual ISO transfer's setup. Returns a list of dicts, each containing an individual ISO transfer parameters: - length - actual_length - status (see libusb1's API documentation for their signification) Returned list is consistent with getISOBufferList return value. Should not be called on a submitted transfer (except for 'length' values). """ transfer_p = self.__transfer transfer = transfer_p.contents # pylint: disable=undefined-variable if transfer.type != TRANSFER_TYPE_ISOCHRONOUS: # pylint: enable=undefined-variable raise TypeError( 'This method cannot be called on non-iso transfers.' ) return [ { 'length': x.length, 'actual_length': x.actual_length, 'status': x.status, } for x in libusb1.get_iso_packet_list(transfer_p) ]
def sizes(self): """Provides block_size and transfer_size through a list of dict, for instance: [{'transfer': '1M 4M', 'block': '8M'}] """ if self.attributes.get('sizes'): for settings in self.attributes.get('sizes'): for pair in itertools.product( shlex.split(settings['block']), shlex.split(settings['transfer']) ): yield pair else: for pair in itertools.product(self.block_size, self.transfer_size): yield pair
Provides block_size and transfer_size through a list of dict, for instance: [{'transfer': '1M 4M', 'block': '8M'}]
Below is the the instruction that describes the task: ### Input: Provides block_size and transfer_size through a list of dict, for instance: [{'transfer': '1M 4M', 'block': '8M'}] ### Response: def sizes(self): """Provides block_size and transfer_size through a list of dict, for instance: [{'transfer': '1M 4M', 'block': '8M'}] """ if self.attributes.get('sizes'): for settings in self.attributes.get('sizes'): for pair in itertools.product( shlex.split(settings['block']), shlex.split(settings['transfer']) ): yield pair else: for pair in itertools.product(self.block_size, self.transfer_size): yield pair
def get_kapur_threshold(image, mask=None): """The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space.""" cropped_image = np.array(image.flat) if mask is None else image[mask] if np.product(cropped_image.shape)<3: return 0 if np.min(cropped_image) == np.max(cropped_image): return cropped_image[0] log_image = np.log2(smooth_with_noise(cropped_image, 8)) min_log_image = np.min(log_image) max_log_image = np.max(log_image) histogram = scipy.ndimage.histogram(log_image, min_log_image, max_log_image, 256) histogram_values = (min_log_image + (max_log_image - min_log_image)* np.arange(256, dtype=float) / 255) # drop any zero bins keep = histogram != 0 histogram = histogram[keep] histogram_values = histogram_values[keep] # check for corner cases if np.product(histogram_values)==1: return 2**histogram_values[0] # Normalize to probabilities p = histogram.astype(float) / float(np.sum(histogram)) # Find the probabilities totals up to and above each possible threshold. lo_sum = np.cumsum(p); hi_sum = lo_sum[-1] - lo_sum; lo_e = np.cumsum(p * np.log2(p)); hi_e = lo_e[-1] - lo_e; # compute the entropies lo_entropy = lo_e / lo_sum - np.log2(lo_sum); hi_entropy = hi_e / hi_sum - np.log2(hi_sum); sum_entropy = lo_entropy[:-1] + hi_entropy[:-1]; sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf entry = np.argmin(sum_entropy); return 2**((histogram_values[entry] + histogram_values[entry+1]) / 2);
The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space.
Below is the the instruction that describes the task: ### Input: The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space. ### Response: def get_kapur_threshold(image, mask=None): """The Kapur, Sahoo, & Wong method of thresholding, adapted to log-space.""" cropped_image = np.array(image.flat) if mask is None else image[mask] if np.product(cropped_image.shape)<3: return 0 if np.min(cropped_image) == np.max(cropped_image): return cropped_image[0] log_image = np.log2(smooth_with_noise(cropped_image, 8)) min_log_image = np.min(log_image) max_log_image = np.max(log_image) histogram = scipy.ndimage.histogram(log_image, min_log_image, max_log_image, 256) histogram_values = (min_log_image + (max_log_image - min_log_image)* np.arange(256, dtype=float) / 255) # drop any zero bins keep = histogram != 0 histogram = histogram[keep] histogram_values = histogram_values[keep] # check for corner cases if np.product(histogram_values)==1: return 2**histogram_values[0] # Normalize to probabilities p = histogram.astype(float) / float(np.sum(histogram)) # Find the probabilities totals up to and above each possible threshold. lo_sum = np.cumsum(p); hi_sum = lo_sum[-1] - lo_sum; lo_e = np.cumsum(p * np.log2(p)); hi_e = lo_e[-1] - lo_e; # compute the entropies lo_entropy = lo_e / lo_sum - np.log2(lo_sum); hi_entropy = hi_e / hi_sum - np.log2(hi_sum); sum_entropy = lo_entropy[:-1] + hi_entropy[:-1]; sum_entropy[np.logical_not(np.isfinite(sum_entropy))] = np.Inf entry = np.argmin(sum_entropy); return 2**((histogram_values[entry] + histogram_values[entry+1]) / 2);
def _represent_arguments(*arguments, **keyword_arguments): """Represent the aruments in a form suitable as a key (hashable) And which will be recognisable to user in error messages >>> print(_represent_arguments([1, 2], **{'fred':'here'})) [1, 2], fred='here' """ argument_strings = [repr(a) for a in arguments] keyword_strings = [ '='.join((k, repr(v))) for k, v in keyword_arguments.items()] return ', '.join(argument_strings + keyword_strings)
Represent the aruments in a form suitable as a key (hashable) And which will be recognisable to user in error messages >>> print(_represent_arguments([1, 2], **{'fred':'here'})) [1, 2], fred='here'
Below is the the instruction that describes the task: ### Input: Represent the aruments in a form suitable as a key (hashable) And which will be recognisable to user in error messages >>> print(_represent_arguments([1, 2], **{'fred':'here'})) [1, 2], fred='here' ### Response: def _represent_arguments(*arguments, **keyword_arguments): """Represent the aruments in a form suitable as a key (hashable) And which will be recognisable to user in error messages >>> print(_represent_arguments([1, 2], **{'fred':'here'})) [1, 2], fred='here' """ argument_strings = [repr(a) for a in arguments] keyword_strings = [ '='.join((k, repr(v))) for k, v in keyword_arguments.items()] return ', '.join(argument_strings + keyword_strings)
def decode_value(value, client): """Converts a Firestore protobuf ``Value`` to a native Python value. Args: value (google.cloud.firestore_v1beta1.types.Value): A Firestore protobuf to be decoded / parsed / converted. client (~.firestore_v1beta1.client.Client): A client that has a document factory. Returns: Union[NoneType, bool, int, float, datetime.datetime, \ str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native Python value converted from the ``value``. Raises: NotImplementedError: If the ``value_type`` is ``reference_value``. ValueError: If the ``value_type`` is unknown. """ value_type = value.WhichOneof("value_type") if value_type == "null_value": return None elif value_type == "boolean_value": return value.boolean_value elif value_type == "integer_value": return value.integer_value elif value_type == "double_value": return value.double_value elif value_type == "timestamp_value": return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value) elif value_type == "string_value": return value.string_value elif value_type == "bytes_value": return value.bytes_value elif value_type == "reference_value": return reference_value_to_document(value.reference_value, client) elif value_type == "geo_point_value": return GeoPoint(value.geo_point_value.latitude, value.geo_point_value.longitude) elif value_type == "array_value": return [decode_value(element, client) for element in value.array_value.values] elif value_type == "map_value": return decode_dict(value.map_value.fields, client) else: raise ValueError("Unknown ``value_type``", value_type)
Converts a Firestore protobuf ``Value`` to a native Python value. Args: value (google.cloud.firestore_v1beta1.types.Value): A Firestore protobuf to be decoded / parsed / converted. client (~.firestore_v1beta1.client.Client): A client that has a document factory. Returns: Union[NoneType, bool, int, float, datetime.datetime, \ str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native Python value converted from the ``value``. Raises: NotImplementedError: If the ``value_type`` is ``reference_value``. ValueError: If the ``value_type`` is unknown.
Below is the the instruction that describes the task: ### Input: Converts a Firestore protobuf ``Value`` to a native Python value. Args: value (google.cloud.firestore_v1beta1.types.Value): A Firestore protobuf to be decoded / parsed / converted. client (~.firestore_v1beta1.client.Client): A client that has a document factory. Returns: Union[NoneType, bool, int, float, datetime.datetime, \ str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native Python value converted from the ``value``. Raises: NotImplementedError: If the ``value_type`` is ``reference_value``. ValueError: If the ``value_type`` is unknown. ### Response: def decode_value(value, client): """Converts a Firestore protobuf ``Value`` to a native Python value. Args: value (google.cloud.firestore_v1beta1.types.Value): A Firestore protobuf to be decoded / parsed / converted. client (~.firestore_v1beta1.client.Client): A client that has a document factory. Returns: Union[NoneType, bool, int, float, datetime.datetime, \ str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native Python value converted from the ``value``. Raises: NotImplementedError: If the ``value_type`` is ``reference_value``. ValueError: If the ``value_type`` is unknown. """ value_type = value.WhichOneof("value_type") if value_type == "null_value": return None elif value_type == "boolean_value": return value.boolean_value elif value_type == "integer_value": return value.integer_value elif value_type == "double_value": return value.double_value elif value_type == "timestamp_value": return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value) elif value_type == "string_value": return value.string_value elif value_type == "bytes_value": return value.bytes_value elif value_type == "reference_value": return reference_value_to_document(value.reference_value, client) elif value_type == "geo_point_value": return GeoPoint(value.geo_point_value.latitude, value.geo_point_value.longitude) elif value_type == "array_value": return [decode_value(element, client) for element in value.array_value.values] elif value_type == "map_value": return decode_dict(value.map_value.fields, client) else: raise ValueError("Unknown ``value_type``", value_type)
def handle(self, sock, read_data, path, headers): "Sends back a static error page." for i in range(self.attempts): try: server_sock = eventlet.connect( tuple(random.choice(self.backends)), ) except socket.error: eventlet.sleep(self.delay) continue # Function to help track data usage def send_onwards(data): server_sock.sendall(data) return len(data) try: size = send_onwards(read_data) size += SocketMelder(sock, server_sock).run() except socket.error, e: if e.errno != errno.EPIPE: raise
Sends back a static error page.
Below is the the instruction that describes the task: ### Input: Sends back a static error page. ### Response: def handle(self, sock, read_data, path, headers): "Sends back a static error page." for i in range(self.attempts): try: server_sock = eventlet.connect( tuple(random.choice(self.backends)), ) except socket.error: eventlet.sleep(self.delay) continue # Function to help track data usage def send_onwards(data): server_sock.sendall(data) return len(data) try: size = send_onwards(read_data) size += SocketMelder(sock, server_sock).run() except socket.error, e: if e.errno != errno.EPIPE: raise
def __embed_branch(dfs_data): """Builds the combinatorial embedding of the graph. Returns whether the graph is planar.""" u = dfs_data['ordering'][0] dfs_data['LF'] = [] dfs_data['RF'] = [] dfs_data['FG'] = {} n = dfs_data['graph'].num_nodes() f0 = (0, n) g0 = (0, n) L0 = {'u': 0, 'v': n} R0 = {'x': 0, 'y': n} dfs_data['LF'].append(f0) dfs_data['RF'].append(g0) dfs_data['FG'][0] = [L0, R0] dfs_data['FG']['m'] = 0 dfs_data['FG']['l'] = 0 dfs_data['FG']['r'] = 0 #print 'DFS Ordering: {}'.format(dfs_data['ordering']) #for node in dfs_data['ordering']: #print '{}: {}'.format(node, dfs_data['adj'][node]) nonplanar = __embed_branch_recursive(u, dfs_data) #print "Nonplanar:", nonplanar return not nonplanar
Builds the combinatorial embedding of the graph. Returns whether the graph is planar.
Below is the the instruction that describes the task: ### Input: Builds the combinatorial embedding of the graph. Returns whether the graph is planar. ### Response: def __embed_branch(dfs_data): """Builds the combinatorial embedding of the graph. Returns whether the graph is planar.""" u = dfs_data['ordering'][0] dfs_data['LF'] = [] dfs_data['RF'] = [] dfs_data['FG'] = {} n = dfs_data['graph'].num_nodes() f0 = (0, n) g0 = (0, n) L0 = {'u': 0, 'v': n} R0 = {'x': 0, 'y': n} dfs_data['LF'].append(f0) dfs_data['RF'].append(g0) dfs_data['FG'][0] = [L0, R0] dfs_data['FG']['m'] = 0 dfs_data['FG']['l'] = 0 dfs_data['FG']['r'] = 0 #print 'DFS Ordering: {}'.format(dfs_data['ordering']) #for node in dfs_data['ordering']: #print '{}: {}'.format(node, dfs_data['adj'][node]) nonplanar = __embed_branch_recursive(u, dfs_data) #print "Nonplanar:", nonplanar return not nonplanar
def import_generators(self, session, debug=False): """ Imports renewable (res) and conventional (conv) generators Args: session : sqlalchemy.orm.session.Session Database session debug: If True, information is printed during process Notes: Connection of generators is done later on in NetworkDing0's method connect_generators() """ def import_res_generators(): """Imports renewable (res) generators""" # build query generators_sqla = session.query( self.orm['orm_re_generators'].columns.id, self.orm['orm_re_generators'].columns.subst_id, self.orm['orm_re_generators'].columns.la_id, self.orm['orm_re_generators'].columns.mvlv_subst_id, self.orm['orm_re_generators'].columns.electrical_capacity, self.orm['orm_re_generators'].columns.generation_type, self.orm['orm_re_generators'].columns.generation_subtype, self.orm['orm_re_generators'].columns.voltage_level, self.orm['orm_re_generators'].columns.w_id, func.ST_AsText(func.ST_Transform( self.orm['orm_re_generators'].columns.rea_geom_new, srid)).label('geom_new'), func.ST_AsText(func.ST_Transform( self.orm['orm_re_generators'].columns.geom, srid)).label('geom') ). \ filter( self.orm['orm_re_generators'].columns.subst_id.in_(list(mv_grid_districts_dict))). \ filter(self.orm['orm_re_generators'].columns.voltage_level.in_([4, 5, 6, 7])). \ filter(self.orm['version_condition_re']) # read data from db generators = pd.read_sql_query(generators_sqla.statement, session.bind, index_col='id') # define generators with unknown subtype as 'unknown' generators.loc[generators[ 'generation_subtype'].isnull(), 'generation_subtype'] = 'unknown' for id_db, row in generators.iterrows(): # treat generators' geom: # use geom_new (relocated genos from data processing) # otherwise use original geom from EnergyMap if row['geom_new']: geo_data = wkt_loads(row['geom_new']) elif not row['geom_new']: geo_data = wkt_loads(row['geom']) logger.warning( 'Generator {} has no geom_new entry,' 'EnergyMap\'s geom entry will be used.'.format( id_db)) # if no geom is available at all, skip generator elif not row['geom']: #geo_data = logger.error('Generator {} has no geom entry either' 'and will be skipped.'.format(id_db)) continue # look up MV grid mv_grid_district_id = row['subst_id'] mv_grid = mv_grid_districts_dict[mv_grid_district_id].mv_grid # create generator object if row['generation_type'] in ['solar', 'wind']: generator = GeneratorFluctuatingDing0( id_db=id_db, mv_grid=mv_grid, capacity=row['electrical_capacity'], type=row['generation_type'], subtype=row['generation_subtype'], v_level=int(row['voltage_level']), weather_cell_id=row['w_id']) else: generator = GeneratorDing0( id_db=id_db, mv_grid=mv_grid, capacity=row['electrical_capacity'], type=row['generation_type'], subtype=row['generation_subtype'], v_level=int(row['voltage_level'])) # MV generators if generator.v_level in [4, 5]: generator.geo_data = geo_data mv_grid.add_generator(generator) # LV generators elif generator.v_level in [6, 7]: # look up MV-LV substation id mvlv_subst_id = row['mvlv_subst_id'] # if there's a LVGD id if mvlv_subst_id and not isnan(mvlv_subst_id): # assume that given LA exists try: # get LVGD lv_station = lv_stations_dict[mvlv_subst_id] lv_grid_district = lv_station.grid.grid_district generator.lv_grid = lv_station.grid # set geom (use original from db) generator.geo_data = geo_data # if LA/LVGD does not exist, choose random LVGD and move generator to station of LVGD # this occurs due to exclusion of LA with peak load < 1kW except: lv_grid_district = random.choice(list(lv_grid_districts_dict.values())) generator.lv_grid = lv_grid_district.lv_grid generator.geo_data = lv_grid_district.lv_grid.station().geo_data logger.warning('Generator {} cannot be assigned to ' 'non-existent LV Grid District and was ' 'allocated to a random LV Grid District ({}).'.format( repr(generator), repr(lv_grid_district))) pass else: lv_grid_district = random.choice(list(lv_grid_districts_dict.values())) generator.lv_grid = lv_grid_district.lv_grid generator.geo_data = lv_grid_district.lv_grid.station().geo_data logger.warning('Generator {} has no la_id and was ' 'assigned to a random LV Grid District ({}).'.format( repr(generator), repr(lv_grid_district))) generator.lv_load_area = lv_grid_district.lv_load_area lv_grid_district.lv_grid.add_generator(generator) def import_conv_generators(): """Imports conventional (conv) generators""" # build query generators_sqla = session.query( self.orm['orm_conv_generators'].columns.id, self.orm['orm_conv_generators'].columns.subst_id, self.orm['orm_conv_generators'].columns.name, self.orm['orm_conv_generators'].columns.capacity, self.orm['orm_conv_generators'].columns.fuel, self.orm['orm_conv_generators'].columns.voltage_level, func.ST_AsText(func.ST_Transform( self.orm['orm_conv_generators'].columns.geom, srid)).label('geom')). \ filter( self.orm['orm_conv_generators'].columns.subst_id.in_(list(mv_grid_districts_dict))). \ filter(self.orm['orm_conv_generators'].columns.voltage_level.in_([4, 5, 6])). \ filter(self.orm['version_condition_conv']) # read data from db generators = pd.read_sql_query(generators_sqla.statement, session.bind, index_col='id') for id_db, row in generators.iterrows(): # look up MV grid mv_grid_district_id = row['subst_id'] mv_grid = mv_grid_districts_dict[mv_grid_district_id].mv_grid # create generator object generator = GeneratorDing0(id_db=id_db, name=row['name'], geo_data=wkt_loads(row['geom']), mv_grid=mv_grid, capacity=row['capacity'], type=row['fuel'], subtype='unknown', v_level=int(row['voltage_level'])) # add generators to graph if generator.v_level in [4, 5]: mv_grid.add_generator(generator) # there's only one conv. geno with v_level=6 -> connect to MV grid elif generator.v_level in [6]: generator.v_level = 5 mv_grid.add_generator(generator) # get ding0s' standard CRS (SRID) srid = str(int(cfg_ding0.get('geo', 'srid'))) # get predefined random seed and initialize random generator seed = int(cfg_ding0.get('random', 'seed')) random.seed(a=seed) # build dicts to map MV grid district and Load Area ids to related objects mv_grid_districts_dict,\ lv_load_areas_dict,\ lv_grid_districts_dict,\ lv_stations_dict = self.get_mvgd_lvla_lvgd_obj_from_id() # import renewable generators import_res_generators() # import conventional generators import_conv_generators() logger.info('=====> Generators imported')
Imports renewable (res) and conventional (conv) generators Args: session : sqlalchemy.orm.session.Session Database session debug: If True, information is printed during process Notes: Connection of generators is done later on in NetworkDing0's method connect_generators()
Below is the the instruction that describes the task: ### Input: Imports renewable (res) and conventional (conv) generators Args: session : sqlalchemy.orm.session.Session Database session debug: If True, information is printed during process Notes: Connection of generators is done later on in NetworkDing0's method connect_generators() ### Response: def import_generators(self, session, debug=False): """ Imports renewable (res) and conventional (conv) generators Args: session : sqlalchemy.orm.session.Session Database session debug: If True, information is printed during process Notes: Connection of generators is done later on in NetworkDing0's method connect_generators() """ def import_res_generators(): """Imports renewable (res) generators""" # build query generators_sqla = session.query( self.orm['orm_re_generators'].columns.id, self.orm['orm_re_generators'].columns.subst_id, self.orm['orm_re_generators'].columns.la_id, self.orm['orm_re_generators'].columns.mvlv_subst_id, self.orm['orm_re_generators'].columns.electrical_capacity, self.orm['orm_re_generators'].columns.generation_type, self.orm['orm_re_generators'].columns.generation_subtype, self.orm['orm_re_generators'].columns.voltage_level, self.orm['orm_re_generators'].columns.w_id, func.ST_AsText(func.ST_Transform( self.orm['orm_re_generators'].columns.rea_geom_new, srid)).label('geom_new'), func.ST_AsText(func.ST_Transform( self.orm['orm_re_generators'].columns.geom, srid)).label('geom') ). \ filter( self.orm['orm_re_generators'].columns.subst_id.in_(list(mv_grid_districts_dict))). \ filter(self.orm['orm_re_generators'].columns.voltage_level.in_([4, 5, 6, 7])). \ filter(self.orm['version_condition_re']) # read data from db generators = pd.read_sql_query(generators_sqla.statement, session.bind, index_col='id') # define generators with unknown subtype as 'unknown' generators.loc[generators[ 'generation_subtype'].isnull(), 'generation_subtype'] = 'unknown' for id_db, row in generators.iterrows(): # treat generators' geom: # use geom_new (relocated genos from data processing) # otherwise use original geom from EnergyMap if row['geom_new']: geo_data = wkt_loads(row['geom_new']) elif not row['geom_new']: geo_data = wkt_loads(row['geom']) logger.warning( 'Generator {} has no geom_new entry,' 'EnergyMap\'s geom entry will be used.'.format( id_db)) # if no geom is available at all, skip generator elif not row['geom']: #geo_data = logger.error('Generator {} has no geom entry either' 'and will be skipped.'.format(id_db)) continue # look up MV grid mv_grid_district_id = row['subst_id'] mv_grid = mv_grid_districts_dict[mv_grid_district_id].mv_grid # create generator object if row['generation_type'] in ['solar', 'wind']: generator = GeneratorFluctuatingDing0( id_db=id_db, mv_grid=mv_grid, capacity=row['electrical_capacity'], type=row['generation_type'], subtype=row['generation_subtype'], v_level=int(row['voltage_level']), weather_cell_id=row['w_id']) else: generator = GeneratorDing0( id_db=id_db, mv_grid=mv_grid, capacity=row['electrical_capacity'], type=row['generation_type'], subtype=row['generation_subtype'], v_level=int(row['voltage_level'])) # MV generators if generator.v_level in [4, 5]: generator.geo_data = geo_data mv_grid.add_generator(generator) # LV generators elif generator.v_level in [6, 7]: # look up MV-LV substation id mvlv_subst_id = row['mvlv_subst_id'] # if there's a LVGD id if mvlv_subst_id and not isnan(mvlv_subst_id): # assume that given LA exists try: # get LVGD lv_station = lv_stations_dict[mvlv_subst_id] lv_grid_district = lv_station.grid.grid_district generator.lv_grid = lv_station.grid # set geom (use original from db) generator.geo_data = geo_data # if LA/LVGD does not exist, choose random LVGD and move generator to station of LVGD # this occurs due to exclusion of LA with peak load < 1kW except: lv_grid_district = random.choice(list(lv_grid_districts_dict.values())) generator.lv_grid = lv_grid_district.lv_grid generator.geo_data = lv_grid_district.lv_grid.station().geo_data logger.warning('Generator {} cannot be assigned to ' 'non-existent LV Grid District and was ' 'allocated to a random LV Grid District ({}).'.format( repr(generator), repr(lv_grid_district))) pass else: lv_grid_district = random.choice(list(lv_grid_districts_dict.values())) generator.lv_grid = lv_grid_district.lv_grid generator.geo_data = lv_grid_district.lv_grid.station().geo_data logger.warning('Generator {} has no la_id and was ' 'assigned to a random LV Grid District ({}).'.format( repr(generator), repr(lv_grid_district))) generator.lv_load_area = lv_grid_district.lv_load_area lv_grid_district.lv_grid.add_generator(generator) def import_conv_generators(): """Imports conventional (conv) generators""" # build query generators_sqla = session.query( self.orm['orm_conv_generators'].columns.id, self.orm['orm_conv_generators'].columns.subst_id, self.orm['orm_conv_generators'].columns.name, self.orm['orm_conv_generators'].columns.capacity, self.orm['orm_conv_generators'].columns.fuel, self.orm['orm_conv_generators'].columns.voltage_level, func.ST_AsText(func.ST_Transform( self.orm['orm_conv_generators'].columns.geom, srid)).label('geom')). \ filter( self.orm['orm_conv_generators'].columns.subst_id.in_(list(mv_grid_districts_dict))). \ filter(self.orm['orm_conv_generators'].columns.voltage_level.in_([4, 5, 6])). \ filter(self.orm['version_condition_conv']) # read data from db generators = pd.read_sql_query(generators_sqla.statement, session.bind, index_col='id') for id_db, row in generators.iterrows(): # look up MV grid mv_grid_district_id = row['subst_id'] mv_grid = mv_grid_districts_dict[mv_grid_district_id].mv_grid # create generator object generator = GeneratorDing0(id_db=id_db, name=row['name'], geo_data=wkt_loads(row['geom']), mv_grid=mv_grid, capacity=row['capacity'], type=row['fuel'], subtype='unknown', v_level=int(row['voltage_level'])) # add generators to graph if generator.v_level in [4, 5]: mv_grid.add_generator(generator) # there's only one conv. geno with v_level=6 -> connect to MV grid elif generator.v_level in [6]: generator.v_level = 5 mv_grid.add_generator(generator) # get ding0s' standard CRS (SRID) srid = str(int(cfg_ding0.get('geo', 'srid'))) # get predefined random seed and initialize random generator seed = int(cfg_ding0.get('random', 'seed')) random.seed(a=seed) # build dicts to map MV grid district and Load Area ids to related objects mv_grid_districts_dict,\ lv_load_areas_dict,\ lv_grid_districts_dict,\ lv_stations_dict = self.get_mvgd_lvla_lvgd_obj_from_id() # import renewable generators import_res_generators() # import conventional generators import_conv_generators() logger.info('=====> Generators imported')
def _private_packages_allowed(): """ Checks if the current user is allowed to create private packages. In the public cloud, the user needs to be on a paid plan. There are no restrictions in other deployments. """ if not HAVE_PAYMENTS or TEAM_ID: return True customer = _get_or_create_customer() plan = _get_customer_plan(customer) return plan != PaymentPlan.FREE
Checks if the current user is allowed to create private packages. In the public cloud, the user needs to be on a paid plan. There are no restrictions in other deployments.
Below is the the instruction that describes the task: ### Input: Checks if the current user is allowed to create private packages. In the public cloud, the user needs to be on a paid plan. There are no restrictions in other deployments. ### Response: def _private_packages_allowed(): """ Checks if the current user is allowed to create private packages. In the public cloud, the user needs to be on a paid plan. There are no restrictions in other deployments. """ if not HAVE_PAYMENTS or TEAM_ID: return True customer = _get_or_create_customer() plan = _get_customer_plan(customer) return plan != PaymentPlan.FREE
def _lte(field, value, document): """ Returns True if the value of a document field is less than or equal to a given value """ try: return document.get(field, None) <= value except TypeError: # pragma: no cover Python < 3.0 return False
Returns True if the value of a document field is less than or equal to a given value
Below is the the instruction that describes the task: ### Input: Returns True if the value of a document field is less than or equal to a given value ### Response: def _lte(field, value, document): """ Returns True if the value of a document field is less than or equal to a given value """ try: return document.get(field, None) <= value except TypeError: # pragma: no cover Python < 3.0 return False
def color( self, name, colorGroup = None ): """ Returns the color for the given name at the inputed group. If no \ group is specified, the first group in the list is used. :param name | <str> colorGroup | <str> || None :return <QColor> """ if ( not colorGroup and self._colorGroups ): colorGroup = self._colorGroups[0] if ( not colorGroup ): return QColor() return self._colors.get(str(name), {}).get(str(colorGroup), QColor())
Returns the color for the given name at the inputed group. If no \ group is specified, the first group in the list is used. :param name | <str> colorGroup | <str> || None :return <QColor>
Below is the the instruction that describes the task: ### Input: Returns the color for the given name at the inputed group. If no \ group is specified, the first group in the list is used. :param name | <str> colorGroup | <str> || None :return <QColor> ### Response: def color( self, name, colorGroup = None ): """ Returns the color for the given name at the inputed group. If no \ group is specified, the first group in the list is used. :param name | <str> colorGroup | <str> || None :return <QColor> """ if ( not colorGroup and self._colorGroups ): colorGroup = self._colorGroups[0] if ( not colorGroup ): return QColor() return self._colors.get(str(name), {}).get(str(colorGroup), QColor())
def __get_stock_row(self, stock: Stock, depth: int) -> str: """ formats stock row """ assert isinstance(stock, Stock) view_model = AssetAllocationViewModel() view_model.depth = depth # Symbol view_model.name = stock.symbol # Current allocation view_model.curr_allocation = stock.curr_alloc # Value in base currency view_model.curr_value = stock.value_in_base_currency # Value in security's currency. view_model.curr_value_own_currency = stock.value view_model.own_currency = stock.currency return view_model
formats stock row
Below is the the instruction that describes the task: ### Input: formats stock row ### Response: def __get_stock_row(self, stock: Stock, depth: int) -> str: """ formats stock row """ assert isinstance(stock, Stock) view_model = AssetAllocationViewModel() view_model.depth = depth # Symbol view_model.name = stock.symbol # Current allocation view_model.curr_allocation = stock.curr_alloc # Value in base currency view_model.curr_value = stock.value_in_base_currency # Value in security's currency. view_model.curr_value_own_currency = stock.value view_model.own_currency = stock.currency return view_model
def alchemyencoder(obj): """JSON encoder function for SQLAlchemy special classes.""" if isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, decimal.Decimal): return float(obj)
JSON encoder function for SQLAlchemy special classes.
Below is the the instruction that describes the task: ### Input: JSON encoder function for SQLAlchemy special classes. ### Response: def alchemyencoder(obj): """JSON encoder function for SQLAlchemy special classes.""" if isinstance(obj, datetime.date): return obj.isoformat() elif isinstance(obj, decimal.Decimal): return float(obj)
def require_option(current_ctx: click.Context, param_name: str) -> None: """Throw an exception if an option wasn't required. This is useful when its optional in some contexts but required for a subcommand""" ctx = current_ctx param_definition = None while ctx is not None: # ctx.command.params has the actual definition of the param. We use # this when raising the exception. param_definition = next( (p for p in ctx.command.params if p.name == param_name), None ) # ctx.params has the current value of the parameter, as set by the user. if ctx.params.get(param_name): return ctx = ctx.parent assert param_definition, f"unknown parameter {param_name}" raise click.MissingParameter(ctx=current_ctx, param=param_definition)
Throw an exception if an option wasn't required. This is useful when its optional in some contexts but required for a subcommand
Below is the the instruction that describes the task: ### Input: Throw an exception if an option wasn't required. This is useful when its optional in some contexts but required for a subcommand ### Response: def require_option(current_ctx: click.Context, param_name: str) -> None: """Throw an exception if an option wasn't required. This is useful when its optional in some contexts but required for a subcommand""" ctx = current_ctx param_definition = None while ctx is not None: # ctx.command.params has the actual definition of the param. We use # this when raising the exception. param_definition = next( (p for p in ctx.command.params if p.name == param_name), None ) # ctx.params has the current value of the parameter, as set by the user. if ctx.params.get(param_name): return ctx = ctx.parent assert param_definition, f"unknown parameter {param_name}" raise click.MissingParameter(ctx=current_ctx, param=param_definition)
def DumpMany(objs): """Stringifies a sequence of Python objects to a multi-document YAML. Args: objs: An iterable of Python objects to convert to YAML. Returns: A multi-document YAML representation of the given objects. """ precondition.AssertIterableType(objs, object) text = yaml.safe_dump_all(objs, default_flow_style=False, allow_unicode=True) if compatibility.PY2: text = text.decode("utf-8") return text
Stringifies a sequence of Python objects to a multi-document YAML. Args: objs: An iterable of Python objects to convert to YAML. Returns: A multi-document YAML representation of the given objects.
Below is the the instruction that describes the task: ### Input: Stringifies a sequence of Python objects to a multi-document YAML. Args: objs: An iterable of Python objects to convert to YAML. Returns: A multi-document YAML representation of the given objects. ### Response: def DumpMany(objs): """Stringifies a sequence of Python objects to a multi-document YAML. Args: objs: An iterable of Python objects to convert to YAML. Returns: A multi-document YAML representation of the given objects. """ precondition.AssertIterableType(objs, object) text = yaml.safe_dump_all(objs, default_flow_style=False, allow_unicode=True) if compatibility.PY2: text = text.decode("utf-8") return text
def execute(self, sql_statement, parameters=None): """ Execute the specified Structured Query Language (SQL) parameterized statement. @note: each result column MUST be named with distinct names. @param sql_statement: a string representation of a Structured Query Language (SQL) expression including Python extended format codes, also known as "pyformat", and extended pyformat code. The following forms are accepted: * ``%(name)s``: indicate a simple value as for instance in:: INSERT INTO foo(bar) VALUES (%(value)s) with ``parameters``:: { 'value': 'something' } * ``%[name]s``: indicate a list of simple values such as, for instance, in:: INSERT INTO foo(bar) VALUES %[values]s with ``parameters``:: { 'values': [ i for i in range(8) ] } * ``%[name]s``: indicate a list of tuples as for instance in:: INSERT INTO foo(a, b, c) VALUES %[values]s with ``parameters``:: { 'values': [ [ 0, 'a', '!' ], [ 1, 'b', '@' ] [ 2, 'c', '#' ] ] } * ``%[name]s``: indicate a list of tuples of ``(boolean, value)`` where: * ``boolean``: ``True`` if the value MUST be used as it, ``False`` if the value needs to be quoted. * ``value``: the value itself. as for instance in:: INSERT INTO foo(id, coordinates) VALUES %[values]s with ``parameters``:: { 'values': [ [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ], [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ], [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ] ] } @param parameters: a dictionary of parameters. @return: a cursor object representing a database cursor, which is used to manage the context of a fetch operation. """ if parameters: # Convert the simple value of parameters which the database adapter # cannot adapt to SQL type, such as enum values. # [http://initd.org/psycopg/docs/usage.html#query-parameters] # # @note: tuple is a special type used to indicate not to quote the # underlying value which is a special SQL expression, such as a # call of a stored procedure. for (name, value) in parameters.iteritems(): if not isinstance(value, (types.NoneType, bool, int, long, float, basestring, tuple, list, set)): parameters[name] = obj.stringify(value) # Replace the placeholders in the SQL statement for which the database # adapter cannot adapt the Python value to SQL types, for instance, # list and nested list. sql_statement = RdbmsConnection._prepare_statement(sql_statement, parameters) # Compact the SQL statement expression removing useless space and # newline characters, and stripping all SQL comments. sql_statement = ' '.join( [ line for line in [ line.strip() for line in REGEX_PATTERN_SQL_COMMENT.sub('\n', sql_statement.strip()).splitlines() ] if len(line) > 0 ]) self.logger.debug('[DEBUG] Executing SQL statement:\n%s\n\twith: %s' % (sql_statement, parameters)) if self.__cursor is None: self.__cursor = self.__connection.cursor() execution_start_time = datetime.datetime.now() self.__cursor.execute(sql_statement, parameters) execution_end_time = datetime.datetime.now() execution_duration = execution_end_time - execution_start_time self.logger.debug('Time: %d ms' % ((execution_duration.seconds * 1000) + (execution_duration.microseconds / 1000))) return RdbmsConnection.RdbmsCursor(self.__cursor)
Execute the specified Structured Query Language (SQL) parameterized statement. @note: each result column MUST be named with distinct names. @param sql_statement: a string representation of a Structured Query Language (SQL) expression including Python extended format codes, also known as "pyformat", and extended pyformat code. The following forms are accepted: * ``%(name)s``: indicate a simple value as for instance in:: INSERT INTO foo(bar) VALUES (%(value)s) with ``parameters``:: { 'value': 'something' } * ``%[name]s``: indicate a list of simple values such as, for instance, in:: INSERT INTO foo(bar) VALUES %[values]s with ``parameters``:: { 'values': [ i for i in range(8) ] } * ``%[name]s``: indicate a list of tuples as for instance in:: INSERT INTO foo(a, b, c) VALUES %[values]s with ``parameters``:: { 'values': [ [ 0, 'a', '!' ], [ 1, 'b', '@' ] [ 2, 'c', '#' ] ] } * ``%[name]s``: indicate a list of tuples of ``(boolean, value)`` where: * ``boolean``: ``True`` if the value MUST be used as it, ``False`` if the value needs to be quoted. * ``value``: the value itself. as for instance in:: INSERT INTO foo(id, coordinates) VALUES %[values]s with ``parameters``:: { 'values': [ [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ], [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ], [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ] ] } @param parameters: a dictionary of parameters. @return: a cursor object representing a database cursor, which is used to manage the context of a fetch operation.
Below is the the instruction that describes the task: ### Input: Execute the specified Structured Query Language (SQL) parameterized statement. @note: each result column MUST be named with distinct names. @param sql_statement: a string representation of a Structured Query Language (SQL) expression including Python extended format codes, also known as "pyformat", and extended pyformat code. The following forms are accepted: * ``%(name)s``: indicate a simple value as for instance in:: INSERT INTO foo(bar) VALUES (%(value)s) with ``parameters``:: { 'value': 'something' } * ``%[name]s``: indicate a list of simple values such as, for instance, in:: INSERT INTO foo(bar) VALUES %[values]s with ``parameters``:: { 'values': [ i for i in range(8) ] } * ``%[name]s``: indicate a list of tuples as for instance in:: INSERT INTO foo(a, b, c) VALUES %[values]s with ``parameters``:: { 'values': [ [ 0, 'a', '!' ], [ 1, 'b', '@' ] [ 2, 'c', '#' ] ] } * ``%[name]s``: indicate a list of tuples of ``(boolean, value)`` where: * ``boolean``: ``True`` if the value MUST be used as it, ``False`` if the value needs to be quoted. * ``value``: the value itself. as for instance in:: INSERT INTO foo(id, coordinates) VALUES %[values]s with ``parameters``:: { 'values': [ [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ], [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ], [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ] ] } @param parameters: a dictionary of parameters. @return: a cursor object representing a database cursor, which is used to manage the context of a fetch operation. ### Response: def execute(self, sql_statement, parameters=None): """ Execute the specified Structured Query Language (SQL) parameterized statement. @note: each result column MUST be named with distinct names. @param sql_statement: a string representation of a Structured Query Language (SQL) expression including Python extended format codes, also known as "pyformat", and extended pyformat code. The following forms are accepted: * ``%(name)s``: indicate a simple value as for instance in:: INSERT INTO foo(bar) VALUES (%(value)s) with ``parameters``:: { 'value': 'something' } * ``%[name]s``: indicate a list of simple values such as, for instance, in:: INSERT INTO foo(bar) VALUES %[values]s with ``parameters``:: { 'values': [ i for i in range(8) ] } * ``%[name]s``: indicate a list of tuples as for instance in:: INSERT INTO foo(a, b, c) VALUES %[values]s with ``parameters``:: { 'values': [ [ 0, 'a', '!' ], [ 1, 'b', '@' ] [ 2, 'c', '#' ] ] } * ``%[name]s``: indicate a list of tuples of ``(boolean, value)`` where: * ``boolean``: ``True`` if the value MUST be used as it, ``False`` if the value needs to be quoted. * ``value``: the value itself. as for instance in:: INSERT INTO foo(id, coordinates) VALUES %[values]s with ``parameters``:: { 'values': [ [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ], [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ], [ uuid.uuid1(), (False, 'ST_SetSRID(ST_MakePoint(160.1, 10.6), 4326)') ] ] } @param parameters: a dictionary of parameters. @return: a cursor object representing a database cursor, which is used to manage the context of a fetch operation. """ if parameters: # Convert the simple value of parameters which the database adapter # cannot adapt to SQL type, such as enum values. # [http://initd.org/psycopg/docs/usage.html#query-parameters] # # @note: tuple is a special type used to indicate not to quote the # underlying value which is a special SQL expression, such as a # call of a stored procedure. for (name, value) in parameters.iteritems(): if not isinstance(value, (types.NoneType, bool, int, long, float, basestring, tuple, list, set)): parameters[name] = obj.stringify(value) # Replace the placeholders in the SQL statement for which the database # adapter cannot adapt the Python value to SQL types, for instance, # list and nested list. sql_statement = RdbmsConnection._prepare_statement(sql_statement, parameters) # Compact the SQL statement expression removing useless space and # newline characters, and stripping all SQL comments. sql_statement = ' '.join( [ line for line in [ line.strip() for line in REGEX_PATTERN_SQL_COMMENT.sub('\n', sql_statement.strip()).splitlines() ] if len(line) > 0 ]) self.logger.debug('[DEBUG] Executing SQL statement:\n%s\n\twith: %s' % (sql_statement, parameters)) if self.__cursor is None: self.__cursor = self.__connection.cursor() execution_start_time = datetime.datetime.now() self.__cursor.execute(sql_statement, parameters) execution_end_time = datetime.datetime.now() execution_duration = execution_end_time - execution_start_time self.logger.debug('Time: %d ms' % ((execution_duration.seconds * 1000) + (execution_duration.microseconds / 1000))) return RdbmsConnection.RdbmsCursor(self.__cursor)
def quantity_from_hdf5(dset): """ Return an Astropy Quantity object from a key in an HDF5 file, group, or dataset. This checks to see if the input file/group/dataset contains a ``'unit'`` attribute (e.g., in `f.attrs`). Parameters ---------- dset : :class:`h5py.DataSet` Returns ------- q : `astropy.units.Quantity`, `numpy.ndarray` If a unit attribute exists, this returns a Quantity. Otherwise, it returns a numpy array. """ if 'unit' in dset.attrs and dset.attrs['unit'] is not None: unit = u.Unit(dset.attrs['unit']) else: unit = 1. return dset[:] * unit
Return an Astropy Quantity object from a key in an HDF5 file, group, or dataset. This checks to see if the input file/group/dataset contains a ``'unit'`` attribute (e.g., in `f.attrs`). Parameters ---------- dset : :class:`h5py.DataSet` Returns ------- q : `astropy.units.Quantity`, `numpy.ndarray` If a unit attribute exists, this returns a Quantity. Otherwise, it returns a numpy array.
Below is the the instruction that describes the task: ### Input: Return an Astropy Quantity object from a key in an HDF5 file, group, or dataset. This checks to see if the input file/group/dataset contains a ``'unit'`` attribute (e.g., in `f.attrs`). Parameters ---------- dset : :class:`h5py.DataSet` Returns ------- q : `astropy.units.Quantity`, `numpy.ndarray` If a unit attribute exists, this returns a Quantity. Otherwise, it returns a numpy array. ### Response: def quantity_from_hdf5(dset): """ Return an Astropy Quantity object from a key in an HDF5 file, group, or dataset. This checks to see if the input file/group/dataset contains a ``'unit'`` attribute (e.g., in `f.attrs`). Parameters ---------- dset : :class:`h5py.DataSet` Returns ------- q : `astropy.units.Quantity`, `numpy.ndarray` If a unit attribute exists, this returns a Quantity. Otherwise, it returns a numpy array. """ if 'unit' in dset.attrs and dset.attrs['unit'] is not None: unit = u.Unit(dset.attrs['unit']) else: unit = 1. return dset[:] * unit
def desaturate(self, level): """Create a new instance based on this one but less saturated. Parameters: :level: The amount by which the color should be desaturated to produce the new one [0...1]. Returns: A grapefruit.Color instance. >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25) Color(0.625, 0.5, 0.375, 1.0) >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25).hsl (30.0, 0.25, 0.5) """ h, s, l = self.__hsl return Color((h, max(s - level, 0), l), 'hsl', self.__a, self.__wref)
Create a new instance based on this one but less saturated. Parameters: :level: The amount by which the color should be desaturated to produce the new one [0...1]. Returns: A grapefruit.Color instance. >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25) Color(0.625, 0.5, 0.375, 1.0) >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25).hsl (30.0, 0.25, 0.5)
Below is the the instruction that describes the task: ### Input: Create a new instance based on this one but less saturated. Parameters: :level: The amount by which the color should be desaturated to produce the new one [0...1]. Returns: A grapefruit.Color instance. >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25) Color(0.625, 0.5, 0.375, 1.0) >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25).hsl (30.0, 0.25, 0.5) ### Response: def desaturate(self, level): """Create a new instance based on this one but less saturated. Parameters: :level: The amount by which the color should be desaturated to produce the new one [0...1]. Returns: A grapefruit.Color instance. >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25) Color(0.625, 0.5, 0.375, 1.0) >>> Color.from_hsl(30, 0.5, 0.5).desaturate(0.25).hsl (30.0, 0.25, 0.5) """ h, s, l = self.__hsl return Color((h, max(s - level, 0), l), 'hsl', self.__a, self.__wref)
def listCount(l): """returns len() of each item in a list, as a list.""" for i in range(len(l)): l[i]=len(l[i]) return l
returns len() of each item in a list, as a list.
Below is the the instruction that describes the task: ### Input: returns len() of each item in a list, as a list. ### Response: def listCount(l): """returns len() of each item in a list, as a list.""" for i in range(len(l)): l[i]=len(l[i]) return l
def _exclusively_used(self, context, hosting_device, tenant_id): """Checks if only <tenant_id>'s resources use <hosting_device>.""" return (context.session.query(hd_models.SlotAllocation).filter( hd_models.SlotAllocation.hosting_device_id == hosting_device['id'], hd_models.SlotAllocation.logical_resource_owner != tenant_id). first() is None)
Checks if only <tenant_id>'s resources use <hosting_device>.
Below is the the instruction that describes the task: ### Input: Checks if only <tenant_id>'s resources use <hosting_device>. ### Response: def _exclusively_used(self, context, hosting_device, tenant_id): """Checks if only <tenant_id>'s resources use <hosting_device>.""" return (context.session.query(hd_models.SlotAllocation).filter( hd_models.SlotAllocation.hosting_device_id == hosting_device['id'], hd_models.SlotAllocation.logical_resource_owner != tenant_id). first() is None)
def show(self, app_path, browser=None, new='tab'): ''' Opens an app in a browser window or tab. This method is useful for testing or running Bokeh server applications on a local machine but should not call when running Bokeh server for an actual deployment. Args: app_path (str) : the app path to open The part of the URL after the hostname:port, with leading slash. browser (str, optional) : browser to show with (default: None) For systems that support it, the **browser** argument allows specifying which browser to display in, e.g. "safari", "firefox", "opera", "windows-default" (see the ``webbrowser`` module documentation in the standard lib for more details). new (str, optional) : window or tab (default: "tab") If ``new`` is 'tab', then opens a new tab. If ``new`` is 'window', then opens a new window. Returns: None ''' if not app_path.startswith("/"): raise ValueError("app_path must start with a /") address_string = 'localhost' if self.address is not None and self.address != '': address_string = self.address url = "http://%s:%d%s%s" % (address_string, self.port, self.prefix, app_path) from bokeh.util.browser import view view(url, browser=browser, new=new)
Opens an app in a browser window or tab. This method is useful for testing or running Bokeh server applications on a local machine but should not call when running Bokeh server for an actual deployment. Args: app_path (str) : the app path to open The part of the URL after the hostname:port, with leading slash. browser (str, optional) : browser to show with (default: None) For systems that support it, the **browser** argument allows specifying which browser to display in, e.g. "safari", "firefox", "opera", "windows-default" (see the ``webbrowser`` module documentation in the standard lib for more details). new (str, optional) : window or tab (default: "tab") If ``new`` is 'tab', then opens a new tab. If ``new`` is 'window', then opens a new window. Returns: None
Below is the the instruction that describes the task: ### Input: Opens an app in a browser window or tab. This method is useful for testing or running Bokeh server applications on a local machine but should not call when running Bokeh server for an actual deployment. Args: app_path (str) : the app path to open The part of the URL after the hostname:port, with leading slash. browser (str, optional) : browser to show with (default: None) For systems that support it, the **browser** argument allows specifying which browser to display in, e.g. "safari", "firefox", "opera", "windows-default" (see the ``webbrowser`` module documentation in the standard lib for more details). new (str, optional) : window or tab (default: "tab") If ``new`` is 'tab', then opens a new tab. If ``new`` is 'window', then opens a new window. Returns: None ### Response: def show(self, app_path, browser=None, new='tab'): ''' Opens an app in a browser window or tab. This method is useful for testing or running Bokeh server applications on a local machine but should not call when running Bokeh server for an actual deployment. Args: app_path (str) : the app path to open The part of the URL after the hostname:port, with leading slash. browser (str, optional) : browser to show with (default: None) For systems that support it, the **browser** argument allows specifying which browser to display in, e.g. "safari", "firefox", "opera", "windows-default" (see the ``webbrowser`` module documentation in the standard lib for more details). new (str, optional) : window or tab (default: "tab") If ``new`` is 'tab', then opens a new tab. If ``new`` is 'window', then opens a new window. Returns: None ''' if not app_path.startswith("/"): raise ValueError("app_path must start with a /") address_string = 'localhost' if self.address is not None and self.address != '': address_string = self.address url = "http://%s:%d%s%s" % (address_string, self.port, self.prefix, app_path) from bokeh.util.browser import view view(url, browser=browser, new=new)
def fetch(self): """ Fetch a BalanceInstance :returns: Fetched BalanceInstance :rtype: twilio.rest.api.v2010.account.balance.BalanceInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return BalanceInstance(self._version, payload, account_sid=self._solution['account_sid'], )
Fetch a BalanceInstance :returns: Fetched BalanceInstance :rtype: twilio.rest.api.v2010.account.balance.BalanceInstance
Below is the the instruction that describes the task: ### Input: Fetch a BalanceInstance :returns: Fetched BalanceInstance :rtype: twilio.rest.api.v2010.account.balance.BalanceInstance ### Response: def fetch(self): """ Fetch a BalanceInstance :returns: Fetched BalanceInstance :rtype: twilio.rest.api.v2010.account.balance.BalanceInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return BalanceInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def battery_charge_current(self): """ Returns current in mA """ msb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_CHARGE_CURRENT_MSB_REG) lsb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_CHARGE_CURRENT_LSB_REG) # (12 bits) charge_bin = msb << 4 | lsb & 0x0f # 0 mV -> 000h, 0.5 mA/bit FFFh -> 1800 mA return charge_bin * 0.5
Returns current in mA
Below is the the instruction that describes the task: ### Input: Returns current in mA ### Response: def battery_charge_current(self): """ Returns current in mA """ msb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_CHARGE_CURRENT_MSB_REG) lsb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_CHARGE_CURRENT_LSB_REG) # (12 bits) charge_bin = msb << 4 | lsb & 0x0f # 0 mV -> 000h, 0.5 mA/bit FFFh -> 1800 mA return charge_bin * 0.5
def _initialize(self, provide_data: List[mx.io.DataDesc], provide_label: List[mx.io.DataDesc], default_bucket_key: Tuple[int, int]) -> None: """ Initializes model components, creates scoring symbol and module, and binds it. :param provide_data: List of data descriptors. :param provide_label: List of label descriptors. :param default_bucket_key: The default maximum (source, target) lengths. """ source = mx.sym.Variable(C.SOURCE_NAME) source_words = source.split(num_outputs=self.config.config_embed_source.num_factors, axis=2, squeeze_axis=True)[0] source_length = utils.compute_lengths(source_words) target = mx.sym.Variable(C.TARGET_NAME) target_length = utils.compute_lengths(target) # labels shape: (batch_size, target_length) (usually the maximum target sequence length) labels = mx.sym.Variable(C.TARGET_LABEL_NAME) data_names = [C.SOURCE_NAME, C.TARGET_NAME] label_names = [C.TARGET_LABEL_NAME] # check provide_{data,label} names provide_data_names = [d[0] for d in provide_data] utils.check_condition(provide_data_names == data_names, "incompatible provide_data: %s, names should be %s" % (provide_data_names, data_names)) provide_label_names = [d[0] for d in provide_label] utils.check_condition(provide_label_names == label_names, "incompatible provide_label: %s, names should be %s" % (provide_label_names, label_names)) def sym_gen(seq_lens): """ Returns a (grouped) symbol containing the summed score for each sentence, as well as the entire target distributions for each word. Also returns data and label names for the BucketingModule. """ source_seq_len, target_seq_len = seq_lens # source embedding (source_embed, source_embed_length, source_embed_seq_len) = self.embedding_source.encode(source, source_length, source_seq_len) # target embedding (target_embed, target_embed_length, target_embed_seq_len) = self.embedding_target.encode(target, target_length, target_seq_len) # encoder # source_encoded: (batch_size, source_encoded_length, encoder_depth) (source_encoded, source_encoded_length, source_encoded_seq_len) = self.encoder.encode(source_embed, source_embed_length, source_embed_seq_len) # decoder # target_decoded: (batch-size, target_len, decoder_depth) target_decoded = self.decoder.decode_sequence(source_encoded, source_encoded_length, source_encoded_seq_len, target_embed, target_embed_length, target_embed_seq_len) # output layer # logits: (batch_size * target_seq_len, target_vocab_size) logits = self.output_layer(mx.sym.reshape(data=target_decoded, shape=(-3, 0))) # logits after reshape: (batch_size, target_seq_len, target_vocab_size) logits = mx.sym.reshape(data=logits, shape=(-4, -1, target_embed_seq_len, 0)) if self.softmax_temperature is not None: logits = logits / self.softmax_temperature # Compute the softmax along the final dimension. # target_dists: (batch_size, target_seq_len, target_vocab_size) target_dists = mx.sym.softmax(data=logits, axis=2, name=C.SOFTMAX_NAME) # Select the label probability, then take their logs. # probs and scores: (batch_size, target_seq_len) probs = mx.sym.pick(target_dists, labels) scores = mx.sym.log(probs) if self.score_type == C.SCORING_TYPE_NEGLOGPROB: scores = -1 * scores # Sum, then apply length penalty. The call to `mx.sym.where` masks out invalid values from scores. # zeros and sums: (batch_size,) zeros = mx.sym.zeros_like(scores) sums = mx.sym.sum(mx.sym.where(labels != 0, scores, zeros), axis=1) / (self.length_penalty(target_length - 1)) # Deal with the potential presence of brevity penalty # length_ratio: (batch_size,) if self.constant_length_ratio > 0.0: # override all ratios with the constant value length_ratio = self.constant_length_ratio * mx.sym.ones_like(sums) else: # predict length ratio if supported length_ratio = self.length_ratio(source_encoded, source_encoded_length).reshape((-1,)) \ if self.length_ratio is not None else mx.sym.zeros_like(sums) sums = sums - self.brevity_penalty(target_length - 1, length_ratio * source_encoded_length) # Return the sums and the target distributions # sums: (batch_size,) target_dists: (batch_size, target_seq_len, target_vocab_size) return mx.sym.Group([sums, target_dists]), data_names, label_names symbol, _, __ = sym_gen(default_bucket_key) self.module = mx.mod.Module(symbol=symbol, data_names=data_names, label_names=label_names, logger=logger, context=self.context) self.module.bind(data_shapes=provide_data, label_shapes=provide_label, for_training=False, force_rebind=False, grad_req='null')
Initializes model components, creates scoring symbol and module, and binds it. :param provide_data: List of data descriptors. :param provide_label: List of label descriptors. :param default_bucket_key: The default maximum (source, target) lengths.
Below is the the instruction that describes the task: ### Input: Initializes model components, creates scoring symbol and module, and binds it. :param provide_data: List of data descriptors. :param provide_label: List of label descriptors. :param default_bucket_key: The default maximum (source, target) lengths. ### Response: def _initialize(self, provide_data: List[mx.io.DataDesc], provide_label: List[mx.io.DataDesc], default_bucket_key: Tuple[int, int]) -> None: """ Initializes model components, creates scoring symbol and module, and binds it. :param provide_data: List of data descriptors. :param provide_label: List of label descriptors. :param default_bucket_key: The default maximum (source, target) lengths. """ source = mx.sym.Variable(C.SOURCE_NAME) source_words = source.split(num_outputs=self.config.config_embed_source.num_factors, axis=2, squeeze_axis=True)[0] source_length = utils.compute_lengths(source_words) target = mx.sym.Variable(C.TARGET_NAME) target_length = utils.compute_lengths(target) # labels shape: (batch_size, target_length) (usually the maximum target sequence length) labels = mx.sym.Variable(C.TARGET_LABEL_NAME) data_names = [C.SOURCE_NAME, C.TARGET_NAME] label_names = [C.TARGET_LABEL_NAME] # check provide_{data,label} names provide_data_names = [d[0] for d in provide_data] utils.check_condition(provide_data_names == data_names, "incompatible provide_data: %s, names should be %s" % (provide_data_names, data_names)) provide_label_names = [d[0] for d in provide_label] utils.check_condition(provide_label_names == label_names, "incompatible provide_label: %s, names should be %s" % (provide_label_names, label_names)) def sym_gen(seq_lens): """ Returns a (grouped) symbol containing the summed score for each sentence, as well as the entire target distributions for each word. Also returns data and label names for the BucketingModule. """ source_seq_len, target_seq_len = seq_lens # source embedding (source_embed, source_embed_length, source_embed_seq_len) = self.embedding_source.encode(source, source_length, source_seq_len) # target embedding (target_embed, target_embed_length, target_embed_seq_len) = self.embedding_target.encode(target, target_length, target_seq_len) # encoder # source_encoded: (batch_size, source_encoded_length, encoder_depth) (source_encoded, source_encoded_length, source_encoded_seq_len) = self.encoder.encode(source_embed, source_embed_length, source_embed_seq_len) # decoder # target_decoded: (batch-size, target_len, decoder_depth) target_decoded = self.decoder.decode_sequence(source_encoded, source_encoded_length, source_encoded_seq_len, target_embed, target_embed_length, target_embed_seq_len) # output layer # logits: (batch_size * target_seq_len, target_vocab_size) logits = self.output_layer(mx.sym.reshape(data=target_decoded, shape=(-3, 0))) # logits after reshape: (batch_size, target_seq_len, target_vocab_size) logits = mx.sym.reshape(data=logits, shape=(-4, -1, target_embed_seq_len, 0)) if self.softmax_temperature is not None: logits = logits / self.softmax_temperature # Compute the softmax along the final dimension. # target_dists: (batch_size, target_seq_len, target_vocab_size) target_dists = mx.sym.softmax(data=logits, axis=2, name=C.SOFTMAX_NAME) # Select the label probability, then take their logs. # probs and scores: (batch_size, target_seq_len) probs = mx.sym.pick(target_dists, labels) scores = mx.sym.log(probs) if self.score_type == C.SCORING_TYPE_NEGLOGPROB: scores = -1 * scores # Sum, then apply length penalty. The call to `mx.sym.where` masks out invalid values from scores. # zeros and sums: (batch_size,) zeros = mx.sym.zeros_like(scores) sums = mx.sym.sum(mx.sym.where(labels != 0, scores, zeros), axis=1) / (self.length_penalty(target_length - 1)) # Deal with the potential presence of brevity penalty # length_ratio: (batch_size,) if self.constant_length_ratio > 0.0: # override all ratios with the constant value length_ratio = self.constant_length_ratio * mx.sym.ones_like(sums) else: # predict length ratio if supported length_ratio = self.length_ratio(source_encoded, source_encoded_length).reshape((-1,)) \ if self.length_ratio is not None else mx.sym.zeros_like(sums) sums = sums - self.brevity_penalty(target_length - 1, length_ratio * source_encoded_length) # Return the sums and the target distributions # sums: (batch_size,) target_dists: (batch_size, target_seq_len, target_vocab_size) return mx.sym.Group([sums, target_dists]), data_names, label_names symbol, _, __ = sym_gen(default_bucket_key) self.module = mx.mod.Module(symbol=symbol, data_names=data_names, label_names=label_names, logger=logger, context=self.context) self.module.bind(data_shapes=provide_data, label_shapes=provide_label, for_training=False, force_rebind=False, grad_req='null')
def wrap_message(self, message): """ Cryptographically signs and optionally encrypts the supplied message. The message is only encrypted if 'confidentiality' was negotiated, otherwise the message is left untouched. :return: A tuple containing the message signature and the optionally encrypted message """ if not self.is_established: raise Exception("Context has not been established") if self._wrapper is None: raise Exception("Neither sealing or signing have been negotiated") else: return self._wrapper.wrap(message)
Cryptographically signs and optionally encrypts the supplied message. The message is only encrypted if 'confidentiality' was negotiated, otherwise the message is left untouched. :return: A tuple containing the message signature and the optionally encrypted message
Below is the the instruction that describes the task: ### Input: Cryptographically signs and optionally encrypts the supplied message. The message is only encrypted if 'confidentiality' was negotiated, otherwise the message is left untouched. :return: A tuple containing the message signature and the optionally encrypted message ### Response: def wrap_message(self, message): """ Cryptographically signs and optionally encrypts the supplied message. The message is only encrypted if 'confidentiality' was negotiated, otherwise the message is left untouched. :return: A tuple containing the message signature and the optionally encrypted message """ if not self.is_established: raise Exception("Context has not been established") if self._wrapper is None: raise Exception("Neither sealing or signing have been negotiated") else: return self._wrapper.wrap(message)
def declare_type(self, type_): """ Declares a type. Checks its name is not already used in the current scope, and that it's not a basic type. Returns the given type_ Symbol, or None on error. """ assert isinstance(type_, symbols.TYPE) # Checks it's not a basic type if not type_.is_basic and type_.name.lower() in TYPE.TYPE_NAMES.values(): syntax_error(type_.lineno, "'%s' is a basic type and cannot be redefined" % type_.name) return None if not self.check_is_undeclared(type_.name, type_.lineno, scope=self.current_scope, show_error=True): return None entry = self.declare(type_.name, type_.lineno, type_) return entry
Declares a type. Checks its name is not already used in the current scope, and that it's not a basic type. Returns the given type_ Symbol, or None on error.
Below is the the instruction that describes the task: ### Input: Declares a type. Checks its name is not already used in the current scope, and that it's not a basic type. Returns the given type_ Symbol, or None on error. ### Response: def declare_type(self, type_): """ Declares a type. Checks its name is not already used in the current scope, and that it's not a basic type. Returns the given type_ Symbol, or None on error. """ assert isinstance(type_, symbols.TYPE) # Checks it's not a basic type if not type_.is_basic and type_.name.lower() in TYPE.TYPE_NAMES.values(): syntax_error(type_.lineno, "'%s' is a basic type and cannot be redefined" % type_.name) return None if not self.check_is_undeclared(type_.name, type_.lineno, scope=self.current_scope, show_error=True): return None entry = self.declare(type_.name, type_.lineno, type_) return entry
def do_bm(self, arg): """ [~process] bm <address-address> - set memory breakpoint """ pid = self.get_process_id_from_prefix() if not self.debug.is_debugee(pid): raise CmdError("target process is not being debugged") process = self.get_process(pid) token_list = self.split_tokens(arg, 1, 2) address, size = self.input_address_range(token_list[0], pid) self.debug.watch_buffer(pid, address, size)
[~process] bm <address-address> - set memory breakpoint
Below is the the instruction that describes the task: ### Input: [~process] bm <address-address> - set memory breakpoint ### Response: def do_bm(self, arg): """ [~process] bm <address-address> - set memory breakpoint """ pid = self.get_process_id_from_prefix() if not self.debug.is_debugee(pid): raise CmdError("target process is not being debugged") process = self.get_process(pid) token_list = self.split_tokens(arg, 1, 2) address, size = self.input_address_range(token_list[0], pid) self.debug.watch_buffer(pid, address, size)
def destroy_elb(app='', env='dev', region='us-east-1', **_): """Destroy ELB Resources. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: True upon successful completion. """ task_json = get_template( template_file='destroy/destroy_elb.json.j2', app=app, env=env, region=region, vpc=get_vpc_id(account=env, region=region)) wait_for_task(task_json) return True
Destroy ELB Resources. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: True upon successful completion.
Below is the the instruction that describes the task: ### Input: Destroy ELB Resources. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: True upon successful completion. ### Response: def destroy_elb(app='', env='dev', region='us-east-1', **_): """Destroy ELB Resources. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: True upon successful completion. """ task_json = get_template( template_file='destroy/destroy_elb.json.j2', app=app, env=env, region=region, vpc=get_vpc_id(account=env, region=region)) wait_for_task(task_json) return True
def make_parse_err(self, err, reformat=True, include_ln=True): """Make a CoconutParseError from a ParseBaseException.""" err_line = err.line err_index = err.col - 1 err_lineno = err.lineno if include_ln else None if reformat: err_line, err_index = self.reformat(err_line, err_index) if err_lineno is not None: err_lineno = self.adjust(err_lineno) return CoconutParseError(None, err_line, err_index, err_lineno)
Make a CoconutParseError from a ParseBaseException.
Below is the the instruction that describes the task: ### Input: Make a CoconutParseError from a ParseBaseException. ### Response: def make_parse_err(self, err, reformat=True, include_ln=True): """Make a CoconutParseError from a ParseBaseException.""" err_line = err.line err_index = err.col - 1 err_lineno = err.lineno if include_ln else None if reformat: err_line, err_index = self.reformat(err_line, err_index) if err_lineno is not None: err_lineno = self.adjust(err_lineno) return CoconutParseError(None, err_line, err_index, err_lineno)
def process_resource(self, req, resp, resource): """ Process the request after routing. The resource is required to determine if a model based resource is being used for validations so skip processing if no `resource.model` attribute is present. """ try: model = resource.model req.fields = fields.init(req, model) req.filters = filters.init(req, model) req.includes = includes.init(req, model) req.pages = pages.init(req, model) req.sorts = sorts.init(req, model) except AttributeError: pass except InvalidQueryParams as exc: abort(exc)
Process the request after routing. The resource is required to determine if a model based resource is being used for validations so skip processing if no `resource.model` attribute is present.
Below is the the instruction that describes the task: ### Input: Process the request after routing. The resource is required to determine if a model based resource is being used for validations so skip processing if no `resource.model` attribute is present. ### Response: def process_resource(self, req, resp, resource): """ Process the request after routing. The resource is required to determine if a model based resource is being used for validations so skip processing if no `resource.model` attribute is present. """ try: model = resource.model req.fields = fields.init(req, model) req.filters = filters.init(req, model) req.includes = includes.init(req, model) req.pages = pages.init(req, model) req.sorts = sorts.init(req, model) except AttributeError: pass except InvalidQueryParams as exc: abort(exc)
def get_project_files(): """Retrieve a list of project files, ignoring hidden files. :return: sorted list of project files :rtype: :class:`list` """ if is_git_project(): return get_git_project_files() project_files = [] for top, subdirs, files in os.walk('.'): for subdir in subdirs: if subdir.startswith('.'): subdirs.remove(subdir) for f in files: if f.startswith('.'): continue project_files.append(os.path.join(top, f)) return project_files
Retrieve a list of project files, ignoring hidden files. :return: sorted list of project files :rtype: :class:`list`
Below is the the instruction that describes the task: ### Input: Retrieve a list of project files, ignoring hidden files. :return: sorted list of project files :rtype: :class:`list` ### Response: def get_project_files(): """Retrieve a list of project files, ignoring hidden files. :return: sorted list of project files :rtype: :class:`list` """ if is_git_project(): return get_git_project_files() project_files = [] for top, subdirs, files in os.walk('.'): for subdir in subdirs: if subdir.startswith('.'): subdirs.remove(subdir) for f in files: if f.startswith('.'): continue project_files.append(os.path.join(top, f)) return project_files
def _propagated_record(self, rdtype, name, content, nameservers=None): """ If the publicly propagation check should be done, waits until the domain nameservers responses with the propagated record type, name & content and returns a boolean, if the publicly propagation was successful or not. """ latency = self._get_provider_option('latency') propagated = self._get_provider_option('propagated') if propagated == 'yes': retry, max_retry = 0, 20 while retry < max_retry: for rdata in Provider._dns_lookup(name, rdtype, nameservers): if content == rdata.to_text(): LOGGER.info('Hetzner => Record %s has %s %s', name, rdtype, content) return True retry += 1 retry_log = (', retry ({}/{}) in {}s...'.format((retry + 1), max_retry, latency) if retry < max_retry else '') LOGGER.info('Hetzner => Record is not propagated%s', retry_log) time.sleep(latency) return False
If the publicly propagation check should be done, waits until the domain nameservers responses with the propagated record type, name & content and returns a boolean, if the publicly propagation was successful or not.
Below is the the instruction that describes the task: ### Input: If the publicly propagation check should be done, waits until the domain nameservers responses with the propagated record type, name & content and returns a boolean, if the publicly propagation was successful or not. ### Response: def _propagated_record(self, rdtype, name, content, nameservers=None): """ If the publicly propagation check should be done, waits until the domain nameservers responses with the propagated record type, name & content and returns a boolean, if the publicly propagation was successful or not. """ latency = self._get_provider_option('latency') propagated = self._get_provider_option('propagated') if propagated == 'yes': retry, max_retry = 0, 20 while retry < max_retry: for rdata in Provider._dns_lookup(name, rdtype, nameservers): if content == rdata.to_text(): LOGGER.info('Hetzner => Record %s has %s %s', name, rdtype, content) return True retry += 1 retry_log = (', retry ({}/{}) in {}s...'.format((retry + 1), max_retry, latency) if retry < max_retry else '') LOGGER.info('Hetzner => Record is not propagated%s', retry_log) time.sleep(latency) return False
def atoms_order(self): """ Morgan like algorithm for graph nodes ordering :return: dict of atom-weight pairs """ if not len(self): # for empty containers return {} elif len(self) == 1: # optimize single atom containers return dict.fromkeys(self, 2) params = {n: (int(node), tuple(sorted(int(edge) for edge in self._adj[n].values()))) for n, node in self.atoms()} newlevels = {} countprime = iter(primes) weights = {x: newlevels.get(y) or newlevels.setdefault(y, next(countprime)) for x, y in sorted(params.items(), key=itemgetter(1))} tries = len(self) * 4 numb = len(set(weights.values())) stab = 0 while tries: oldnumb = numb neweights = {} countprime = iter(primes) # weights[n] ** 2 NEED for differentiation of molecules like A-B or any other complete graphs. tmp = {n: reduce(mul, (weights[x] for x in m), weights[n] ** 2) for n, m in self._adj.items()} weights = {x: (neweights.get(y) or neweights.setdefault(y, next(countprime))) for x, y in sorted(tmp.items(), key=itemgetter(1))} numb = len(set(weights.values())) if numb == len(self): # each atom now unique break elif numb == oldnumb: x = Counter(weights.values()) if x[min(x)] > 1: if stab == 3: break elif stab >= 2: break stab += 1 elif stab: stab = 0 tries -= 1 if not tries and numb < oldnumb: warning('morgan. number of attempts exceeded. uniqueness has decreased. next attempt will be made') tries = 1 else: warning('morgan. number of attempts exceeded') return weights
Morgan like algorithm for graph nodes ordering :return: dict of atom-weight pairs
Below is the the instruction that describes the task: ### Input: Morgan like algorithm for graph nodes ordering :return: dict of atom-weight pairs ### Response: def atoms_order(self): """ Morgan like algorithm for graph nodes ordering :return: dict of atom-weight pairs """ if not len(self): # for empty containers return {} elif len(self) == 1: # optimize single atom containers return dict.fromkeys(self, 2) params = {n: (int(node), tuple(sorted(int(edge) for edge in self._adj[n].values()))) for n, node in self.atoms()} newlevels = {} countprime = iter(primes) weights = {x: newlevels.get(y) or newlevels.setdefault(y, next(countprime)) for x, y in sorted(params.items(), key=itemgetter(1))} tries = len(self) * 4 numb = len(set(weights.values())) stab = 0 while tries: oldnumb = numb neweights = {} countprime = iter(primes) # weights[n] ** 2 NEED for differentiation of molecules like A-B or any other complete graphs. tmp = {n: reduce(mul, (weights[x] for x in m), weights[n] ** 2) for n, m in self._adj.items()} weights = {x: (neweights.get(y) or neweights.setdefault(y, next(countprime))) for x, y in sorted(tmp.items(), key=itemgetter(1))} numb = len(set(weights.values())) if numb == len(self): # each atom now unique break elif numb == oldnumb: x = Counter(weights.values()) if x[min(x)] > 1: if stab == 3: break elif stab >= 2: break stab += 1 elif stab: stab = 0 tries -= 1 if not tries and numb < oldnumb: warning('morgan. number of attempts exceeded. uniqueness has decreased. next attempt will be made') tries = 1 else: warning('morgan. number of attempts exceeded') return weights
def eaSimpleConverge(population, toolbox, cxpb, mutpb, ngen, stats=None, halloffame=None, callback=None, verbose=True): """This algorithm reproduce the simplest evolutionary algorithm as presented in chapter 7 of [Back2000]_. Modified to allow checking if there is no change for ngen, as a simple rule for convergence. Interface is similar to eaSimple(). However, in eaSimple, ngen is total number of iterations; in eaSimpleConverge, we terminate only when the best is NOT updated for ngen iterations. """ # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} # Begin the generational process gen = 1 best = (0,) while True: # Select the next generation individuals offspring = toolbox.select(population, len(population)) # Vary the pool of individuals offspring = varAnd(offspring, toolbox, cxpb, mutpb) # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Update the hall of fame with the generated individuals if halloffame is not None: halloffame.update(offspring) if callback is not None: callback(halloffame[0], gen) # Replace the current population by the offspring population[:] = offspring # Append the current generation statistics to the logbook record = stats.compile(population) if stats else {} current_best = record['max'] if gen % 20 == 0 and verbose: print("Current iteration {0}: max_score={1}". format(gen, current_best), file=sys.stderr) if current_best > best: best = current_best updated = gen gen += 1 if gen - updated > ngen: break return population
This algorithm reproduce the simplest evolutionary algorithm as presented in chapter 7 of [Back2000]_. Modified to allow checking if there is no change for ngen, as a simple rule for convergence. Interface is similar to eaSimple(). However, in eaSimple, ngen is total number of iterations; in eaSimpleConverge, we terminate only when the best is NOT updated for ngen iterations.
Below is the the instruction that describes the task: ### Input: This algorithm reproduce the simplest evolutionary algorithm as presented in chapter 7 of [Back2000]_. Modified to allow checking if there is no change for ngen, as a simple rule for convergence. Interface is similar to eaSimple(). However, in eaSimple, ngen is total number of iterations; in eaSimpleConverge, we terminate only when the best is NOT updated for ngen iterations. ### Response: def eaSimpleConverge(population, toolbox, cxpb, mutpb, ngen, stats=None, halloffame=None, callback=None, verbose=True): """This algorithm reproduce the simplest evolutionary algorithm as presented in chapter 7 of [Back2000]_. Modified to allow checking if there is no change for ngen, as a simple rule for convergence. Interface is similar to eaSimple(). However, in eaSimple, ngen is total number of iterations; in eaSimpleConverge, we terminate only when the best is NOT updated for ngen iterations. """ # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in population if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} # Begin the generational process gen = 1 best = (0,) while True: # Select the next generation individuals offspring = toolbox.select(population, len(population)) # Vary the pool of individuals offspring = varAnd(offspring, toolbox, cxpb, mutpb) # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Update the hall of fame with the generated individuals if halloffame is not None: halloffame.update(offspring) if callback is not None: callback(halloffame[0], gen) # Replace the current population by the offspring population[:] = offspring # Append the current generation statistics to the logbook record = stats.compile(population) if stats else {} current_best = record['max'] if gen % 20 == 0 and verbose: print("Current iteration {0}: max_score={1}". format(gen, current_best), file=sys.stderr) if current_best > best: best = current_best updated = gen gen += 1 if gen - updated > ngen: break return population
def rotateAboutVectorMatrix(vec, theta_deg): """Construct the matrix that rotates vector a about vector vec by an angle of theta_deg degrees Taken from http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle Input: theta_deg (float) Angle through which vectors should be rotated in degrees Returns: A matrix To rotate a vector, premultiply by this matrix. To rotate the coord sys underneath the vector, post multiply """ ct = np.cos(np.radians(theta_deg)) st = np.sin(np.radians(theta_deg)) # Ensure vector has normal length vec /= np.linalg.norm(vec) assert( np.all( np.isfinite(vec))) # compute the three terms term1 = ct * np.eye(3) ucross = np.zeros( (3,3)) ucross[0] = [0, -vec[2], vec[1]] ucross[1] = [vec[2], 0, -vec[0]] ucross[2] = [-vec[1], vec[0], 0] term2 = st*ucross ufunny = np.zeros( (3,3)) for i in range(0,3): for j in range(i,3): ufunny[i,j] = vec[i]*vec[j] ufunny[j,i] = ufunny[i,j] term3 = (1-ct) * ufunny return term1 + term2 + term3
Construct the matrix that rotates vector a about vector vec by an angle of theta_deg degrees Taken from http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle Input: theta_deg (float) Angle through which vectors should be rotated in degrees Returns: A matrix To rotate a vector, premultiply by this matrix. To rotate the coord sys underneath the vector, post multiply
Below is the the instruction that describes the task: ### Input: Construct the matrix that rotates vector a about vector vec by an angle of theta_deg degrees Taken from http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle Input: theta_deg (float) Angle through which vectors should be rotated in degrees Returns: A matrix To rotate a vector, premultiply by this matrix. To rotate the coord sys underneath the vector, post multiply ### Response: def rotateAboutVectorMatrix(vec, theta_deg): """Construct the matrix that rotates vector a about vector vec by an angle of theta_deg degrees Taken from http://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle Input: theta_deg (float) Angle through which vectors should be rotated in degrees Returns: A matrix To rotate a vector, premultiply by this matrix. To rotate the coord sys underneath the vector, post multiply """ ct = np.cos(np.radians(theta_deg)) st = np.sin(np.radians(theta_deg)) # Ensure vector has normal length vec /= np.linalg.norm(vec) assert( np.all( np.isfinite(vec))) # compute the three terms term1 = ct * np.eye(3) ucross = np.zeros( (3,3)) ucross[0] = [0, -vec[2], vec[1]] ucross[1] = [vec[2], 0, -vec[0]] ucross[2] = [-vec[1], vec[0], 0] term2 = st*ucross ufunny = np.zeros( (3,3)) for i in range(0,3): for j in range(i,3): ufunny[i,j] = vec[i]*vec[j] ufunny[j,i] = ufunny[i,j] term3 = (1-ct) * ufunny return term1 + term2 + term3