code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def clean_up(self): self.socket.close() for h in self.capture_handlers: h['logger'].close()
Clean up the socket and log file handles.
def updateSquareFees(paymentRecord): fees = paymentRecord.netFees invoice = paymentRecord.invoice invoice.fees = fees invoice.save() invoice.allocateFees() return fees
The Square Checkout API does not calculate fees immediately, so this task is called to be asynchronously run 1 minute after the initial transaction, so that any Invoice or ExpenseItem associated with this transaction also remains accurate.
def get_http_rtt( url: str, samples: int = 3, method: str = 'head', timeout: int = 1, ) -> Optional[float]: durations = [] for _ in range(samples): try: durations.append( requests.request(method, url, timeout=timeout).elapsed.total_seconds(), ) except (RequestException, OSError): return None except Exception as ex: print(ex) return None sleep(.125) return sum(durations) / samples
Determine the average HTTP RTT to `url` over the number of `samples`. Returns `None` if the server is unreachable.
def mk_dx_dy_from_geotif_layer(geotif): ELLIPSOID_MAP = {'WGS84': 'WGS-84'} ellipsoid = ELLIPSOID_MAP[geotif.grid_coordinates.wkt] d = distance(ellipsoid=ellipsoid) dx = geotif.grid_coordinates.x_axis dy = geotif.grid_coordinates.y_axis dX = np.zeros((dy.shape[0]-1)) for j in xrange(len(dX)): dX[j] = d.measure((dy[j+1], dx[1]), (dy[j+1], dx[0])) * 1000 dY = np.zeros((dy.shape[0]-1)) for i in xrange(len(dY)): dY[i] = d.measure((dy[i], 0), (dy[i+1], 0)) * 1000 return dX, dY
Extracts the change in x and y coordinates from the geotiff file. Presently only supports WGS-84 files.
def _set_request_auth_type_metric(self, request): if 'HTTP_AUTHORIZATION' in request.META and request.META['HTTP_AUTHORIZATION']: token_parts = request.META['HTTP_AUTHORIZATION'].split() if len(token_parts) == 2: auth_type = token_parts[0].lower() else: auth_type = 'other-token-type' elif not hasattr(request, 'user') or not request.user: auth_type = 'no-user' elif not request.user.is_authenticated: auth_type = 'unauthenticated' else: auth_type = 'session-or-unknown' monitoring.set_custom_metric('request_auth_type', auth_type)
Add metric 'request_auth_type' for the authentication type used. NOTE: This is a best guess at this point. Possible values include: no-user unauthenticated jwt/bearer/other-token-type session-or-unknown (catch all)
def server_console_output(request, instance_id, tail_length=None): nc = _nova.novaclient(request) return nc.servers.get_console_output(instance_id, length=tail_length)
Gets console output of an instance.
def list_from_metadata(cls, url, metadata): key = cls._get_key(url) metadata = Metadata(**metadata) ct = cls._get_create_time(key) time_buckets = cls.get_time_buckets_from_metadata(metadata) return [cls(url, metadata, t, ct, key.size) for t in time_buckets]
return a list of DatalakeRecords for the url and metadata
def format_percent_field(__, prec, number, locale): prec = PERCENT_DECIMAL_DIGITS if prec is None else int(prec) locale = Locale.parse(locale) pattern = locale.percent_formats.get(None) return pattern.apply(number, locale, force_frac=(prec, prec))
Formats a percent field.
def get_name(principal): if isinstance(principal, pywintypes.SIDType): sid_obj = principal else: if principal is None: principal = 'S-1-0-0' try: sid_obj = win32security.ConvertStringSidToSid(principal) except pywintypes.error: try: sid_obj = win32security.LookupAccountName(None, principal)[0] except pywintypes.error: sid_obj = principal try: return win32security.LookupAccountSid(None, sid_obj)[0] except (pywintypes.error, TypeError) as exc: message = 'Error resolving "{0}"'.format(principal) if type(exc) == pywintypes.error: win_error = win32api.FormatMessage(exc.winerror).rstrip('\n') message = '{0}: {1}'.format(message, win_error) log.exception(message) raise CommandExecutionError(message, exc)
Gets the name from the specified principal. Args: principal (str): Find the Normalized name based on this. Can be a PySID object, a SID string, or a user name in any capitalization. .. note:: Searching based on the user name can be slow on hosts connected to large Active Directory domains. Returns: str: The name that corresponds to the passed principal Usage: .. code-block:: python salt.utils.win_dacl.get_name('S-1-5-32-544') salt.utils.win_dacl.get_name('adminisTrators')
def areBackupsDegraded(self): slow_instances = [] if self.acc_monitor: for instance in self.instances.backupIds: if self.acc_monitor.is_instance_degraded(instance): slow_instances.append(instance) else: for instance in self.instances.backupIds: if self.is_instance_throughput_too_low(instance): slow_instances.append(instance) return slow_instances
Return slow instance.
async def update(self, **kwargs): try: self.data[self.pk] = self.pk_type(kwargs['pk']) updated_obj = await self._meta.object_class().update(self.db, data=self.data) if updated_obj is None: raise NotFound('Object matching the given {} was not found'.format(self.pk)) return await updated_obj.serialize() except Exception as ex: logger.exception(ex) raise BadRequest(ex)
Corresponds to PUT request with a resource identifier, updating a single document in the database
def replace_nones(list_, repl=-1): r repl_list = [ repl if item is None else ( replace_nones(item, repl) if isinstance(item, list) else item ) for item in list_ ] return repl_list
r""" Recursively removes Nones in all lists and sublists and replaces them with the repl variable Args: list_ (list): repl (obj): replacement value Returns: list CommandLine: python -m utool.util_list --test-replace_nones Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> # build test data >>> list_ = [None, 0, 1, 2] >>> repl = -1 >>> # execute function >>> repl_list = replace_nones(list_, repl) >>> # verify results >>> result = str(repl_list) >>> print(result) [-1, 0, 1, 2]
def executable(self): if not hasattr(self.local, 'conn'): self.local.conn = self.engine.connect() return self.local.conn
Connection against which statements will be executed.
def commit(using=None): if using is None: for using in tldap.backend.connections: connection = tldap.backend.connections[using] connection.commit() return connection = tldap.backend.connections[using] connection.commit()
Does the commit itself and resets the dirty flag.
def access_view(name, **kwargs): ctx = Context(**kwargs) ctx.execute_action('access:view', **{ 'unicorn': ctx.repo.create_secure_service('unicorn'), 'service': name, })
Shows ACL for the specified service.
def cudnnSetStream(handle, id): status = _libcudnn.cudnnSetStream(handle, id) cudnnCheckStatus(status)
Set current cuDNN library stream. Parameters ---------- handle : cudnnHandle cuDNN context. id : cudaStream Stream Id.
def truncate(text, length=50, ellipsis='...'): text = nativestring(text) return text[:length] + (text[length:] and ellipsis)
Returns a truncated version of the inputted text. :param text | <str> length | <int> ellipsis | <str> :return <str>
def _prepare_record(record, index, doc_type): if current_app.config['INDEXER_REPLACE_REFS']: data = copy.deepcopy(record.replace_refs()) else: data = record.dumps() data['_created'] = pytz.utc.localize(record.created).isoformat() \ if record.created else None data['_updated'] = pytz.utc.localize(record.updated).isoformat() \ if record.updated else None before_record_index.send( current_app._get_current_object(), json=data, record=record, index=index, doc_type=doc_type, ) return data
Prepare record data for indexing. :param record: The record to prepare. :param index: The Elasticsearch index. :param doc_type: The Elasticsearch document type. :returns: The record metadata.
def catch_error(func): import amqp try: import pika.exceptions connect_exceptions = ( pika.exceptions.ConnectionClosed, pika.exceptions.AMQPConnectionError, ) except ImportError: connect_exceptions = () connect_exceptions += ( select.error, socket.error, amqp.ConnectionError ) def wrap(self, *args, **kwargs): try: return func(self, *args, **kwargs) except connect_exceptions as e: logging.error('RabbitMQ error: %r, reconnect.', e) self.reconnect() return func(self, *args, **kwargs) return wrap
Catch errors of rabbitmq then reconnect
def _check_segment_cohesion(self): if self.n_seg != len(self.segments): raise ValueError("Length of segments must match the 'n_seg' field") for i in range(n_seg): s = self.segments[i] if i == 0 and self.seg_len[0] == 0: for file_name in s.file_name: if file_name != '~': raise ValueError("Layout specification records must have all file_names named '~'") if s.fs != self.fs: raise ValueError("The 'fs' in each segment must match the overall record's 'fs'") if s.sig_len != self.seg_len[i]: raise ValueError('The signal length of segment '+str(i)+' does not match the corresponding segment length') totalsig_len = totalsig_len + getattr(s, 'sig_len')
Check the cohesion of the segments field with other fields used to write the record
def is_type(obj, type_, **kwargs): if not is_iterable(type_): type_ = [type_] return_value = False for check_for_type in type_: if isinstance(check_for_type, type): return_value = isinstance(obj, check_for_type) elif obj.__class__.__name__ == check_for_type: return_value = True else: return_value = _check_base_classes(obj.__class__.__bases__, check_for_type) if return_value is True: break return return_value
Indicate if ``obj`` is a type in ``type_``. .. hint:: This checker is particularly useful when you want to evaluate whether ``obj`` is of a particular type, but importing that type directly to use in :func:`isinstance() <python:isinstance>` would cause a circular import error. To use this checker in that kind of situation, you can instead pass the *name* of the type you want to check as a string in ``type_``. The checker will evaluate it and see whether ``obj`` is of a type or inherits from a type whose name matches the string you passed. :param obj: The object whose type should be checked. :type obj: :class:`object <python:object>` :param type_: The type(s) to check against. :type type_: :class:`type <python:type>` / iterable of :class:`type <python:type>` / :class:`str <python:str>` with type name / iterable of :class:`str <python:str>` with type name :returns: ``True`` if ``obj`` is a type in ``type_``. Otherwise, ``False``. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
def configure_app(**kwargs): sys_args = sys.argv args, command, command_args = parse_args(sys_args[1:]) parser = OptionParser() parser.add_option('--config', metavar='CONFIG') (options, logan_args) = parser.parse_args(args) config_path = options.config logan_configure(config_path=config_path, **kwargs)
Builds up the settings using the same method as logan
def skip(self): for pos, element in self.element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "div" and "thread" in class_attr and pos == "end": break
Eats through the input iterator without recording the content.
def set_scenario_hosts_file(self, network_name='user-net', domain_name=None): log = logging.getLogger(self.cls_logger + '.set_scenario_hosts_file') log.info('Scanning scenario hosts to make entries in the hosts file for network: {n}'.format(n=network_name)) for scenario_host in self.scenario_network_info: if domain_name: host_file_entry = '{r}.{d} {r}'.format(r=scenario_host['scenario_role_name'], d=domain_name) else: host_file_entry = scenario_host['scenario_role_name'] for host_network_info in scenario_host['network_info']: if host_network_info['network_name'] == network_name: self.update_hosts_file(ip=host_network_info['internal_ip'], entry=host_file_entry)
Adds hosts file entries for each system in the scenario for the specified network_name provided :param network_name: (str) Name of the network to add to the hosts file :param domain_name: (str) Domain name to include in the hosts file entries if provided :return: None
def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=_flagvalues.FLAGS): for flag_name in flag_names: if flag_values[flag_name].default is not None: warnings.warn( 'Flag --{} has a non-None default value. That does not make sense ' 'with mark_flags_as_mutual_exclusive, which checks whether the ' 'listed flags have a value other than None.'.format(flag_name)) def validate_mutual_exclusion(flags_dict): flag_count = sum(1 for val in flags_dict.values() if val is not None) if flag_count == 1 or (not required and flag_count == 0): return True raise _exceptions.ValidationError( '{} one of ({}) must have a value other than None.'.format( 'Exactly' if required else 'At most', ', '.join(flag_names))) register_multi_flags_validator( flag_names, validate_mutual_exclusion, flag_values=flag_values)
Ensures that only one flag among flag_names is not None. Important note: This validator checks if flag values are None, and it does not distinguish between default and explicit values. Therefore, this validator does not make sense when applied to flags with default values other than None, including other false values (e.g. False, 0, '', []). That includes multi flags with a default value of [] instead of None. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one of the flags must have a value other than None. Otherwise, at most one of the flags can have a value other than None, and it is valid for all of the flags to be None. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined.
def _eval_target_brutal(state, ip, limit): addrs = state.solver.eval_upto(ip, limit) return [ (ip == addr, addr) for addr in addrs ]
The traditional way of evaluating symbolic jump targets. :param state: A SimState instance. :param ip: The AST of the instruction pointer to evaluate. :param limit: The maximum number of concrete IPs. :return: A list of conditions and the corresponding concrete IPs. :rtype: list
def get_extended(self, config): if not config.extends or self._extended: return config extended_config = ContainerConfiguration() for ext_name in config.extends: ext_cfg_base = self._containers.get(ext_name) if not ext_cfg_base: raise KeyError(ext_name) ext_cfg = self.get_extended(ext_cfg_base) extended_config.merge_from_obj(ext_cfg) extended_config.merge_from_obj(config) return extended_config
Generates a configuration that includes all inherited values. :param config: Container configuration. :type config: ContainerConfiguration :return: A merged (shallow) copy of all inherited configurations merged with the container configuration. :rtype: ContainerConfiguration
def set_title(self, title, subtitle=''): self.title = title self.subtitle = subtitle
Set the title and the subtitle of the suite.
def _mudraw(buffer, fmt): with NamedTemporaryFile(suffix='.pdf') as tmp_in: tmp_in.write(buffer) tmp_in.seek(0) tmp_in.flush() proc = run( ['mudraw', '-F', fmt, '-o', '-', tmp_in.name], stdout=PIPE, stderr=PIPE ) if proc.stderr: raise RuntimeError(proc.stderr.decode()) return proc.stdout
Use mupdf draw to rasterize the PDF in the memory buffer
def find_next_character(code, position, char): end = LineCol(code, *position) while not end.eof and end.char() in WHITESPACE: end.inc() if not end.eof and end.char() == char: return end.tuple(), inc_tuple(end.tuple()) return None, None
Find next char and return its first and last positions
def list_exchanges_for_vhost(self, vhost): return self._api_get('/api/exchanges/{0}'.format( urllib.parse.quote_plus(vhost) ))
A list of all exchanges in a given virtual host. :param vhost: The vhost name :type vhost: str
def get_project(self, name): if self._cache is None: result = self._get_project(name) elif name in self._cache: result = self._cache[name] else: self.clear_errors() result = self._get_project(name) self._cache[name] = result return result
For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top.
def open_fileswitcher(self, symbol=False): if self.fileswitcher is not None and \ self.fileswitcher.is_visible: self.fileswitcher.hide() self.fileswitcher.is_visible = False return if symbol: self.fileswitcher.plugin = self.editor self.fileswitcher.set_search_text('@') else: self.fileswitcher.set_search_text('') self.fileswitcher.show() self.fileswitcher.is_visible = True
Open file list management dialog box.
def _make_ssh_forward_handler_class(self, remote_address_): class Handler(_ForwardHandler): remote_address = remote_address_ ssh_transport = self._transport logger = self.logger return Handler
Make SSH Handler class
def author_id_normalize_and_schema(uid, schema=None): def _get_uid_normalized_in_schema(_uid, _schema): regex, template = _RE_AUTHORS_UID[_schema] match = regex.match(_uid) if match: return template.format(match.group('uid')) if idutils.is_orcid(uid) and schema in (None, 'ORCID'): return idutils.normalize_orcid(uid), 'ORCID' if schema and schema not in _RE_AUTHORS_UID: raise UnknownUIDSchema(uid) if schema: normalized_uid = _get_uid_normalized_in_schema(uid, schema) if normalized_uid: return normalized_uid, schema else: raise SchemaUIDConflict(schema, uid) match_schema, normalized_uid = None, None for candidate_schema in _RE_AUTHORS_UID: candidate_uid = _get_uid_normalized_in_schema(uid, candidate_schema) if candidate_uid: if match_schema: raise UnknownUIDSchema(uid) match_schema = candidate_schema normalized_uid = candidate_uid if match_schema: return normalized_uid, match_schema raise UnknownUIDSchema(uid)
Detect and normalize an author UID schema. Args: uid (string): a UID string schema (string): try to resolve to schema Returns: Tuple[string, string]: a tuple (uid, schema) where: - uid: the UID normalized to comply with the id.json schema - schema: a schema of the UID or *None* if not recognised Raise: UnknownUIDSchema: if UID is too little to definitively guess the schema SchemaUIDConflict: if specified schema is not matching the given UID
def edit_rrset_rdata(self, zone_name, rtype, owner_name, rdata, profile=None): if type(rdata) is not list: rdata = [rdata] rrset = {"rdata": rdata} method = "patch" if profile: rrset["profile"] = profile method = "put" uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name return getattr(self.rest_api_connection, method)(uri,json.dumps(rrset))
Updates an existing RRSet's Rdata in the specified zone. Arguments: zone_name -- The zone that contains the RRSet. The trailing dot is optional. rtype -- The type of the RRSet. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) rdata -- The updated BIND data for the RRSet as a string. If there is a single resource record in the RRSet, you can pass in the single string. If there are multiple resource records in this RRSet, pass in a list of strings. profile -- The profile info if this is updating a resource pool
def normal_distribution(mean, variance, minimum=None, maximum=None, weight_count=23): standard_deviation = math.sqrt(variance) min_x = (standard_deviation * -5) + mean max_x = (standard_deviation * 5) + mean step = (max_x - min_x) / weight_count current_x = min_x weights = [] while current_x < max_x: weights.append( (current_x, _normal_function(current_x, mean, variance)) ) current_x += step if minimum is not None or maximum is not None: return bound_weights(weights, minimum, maximum) else: return weights
Return a list of weights approximating a normal distribution. Args: mean (float): The mean of the distribution variance (float): The variance of the distribution minimum (float): The minimum outcome possible to bound the output distribution to maximum (float): The maximum outcome possible to bound the output distribution to weight_count (int): The number of weights that will be used to approximate the distribution Returns: list: a list of ``(float, float)`` weight tuples approximating a normal distribution. Raises: ValueError: ``if maximum < minimum`` TypeError: if both ``minimum`` and ``maximum`` are ``None`` Example: >>> weights = normal_distribution(10, 3, ... minimum=0, maximum=20, ... weight_count=5) >>> rounded_weights = [(round(value, 2), round(strength, 2)) ... for value, strength in weights] >>> rounded_weights [(1.34, 0.0), (4.8, 0.0), (8.27, 0.14), (11.73, 0.14), (15.2, 0.0)]
def resource_name(cls): if cls.__name__ == "NURESTRootObject" or cls.__name__ == "NURESTObject": return "Not Implemented" if cls.__resource_name__ is None: raise NotImplementedError('%s has no defined resource name. Implement resource_name property first.' % cls) return cls.__resource_name__
Represents the resource name
def subnet_distance(self): return [(Element.from_href(entry.get('subnet')), entry.get('distance')) for entry in self.data.get('distance_entry')]
Specific subnet administrative distances :return: list of tuple (subnet, distance)
def _move_mount(robot, mount, point): carriage = robot._actuators[mount]['carriage'] robot.poses = carriage.home(robot.poses) other_mount = 'left' if mount == 'right' else 'right' robot.poses = robot._actuators[other_mount]['carriage'].home(robot.poses) robot.gantry.move( robot.poses, x=point[0], y=point[1]) robot.poses = carriage.move( robot.poses, z=point[2]) x, y, _ = tuple( pose_tracker.absolute( robot.poses, robot._actuators[mount]['carriage'])) _, _, z = tuple( pose_tracker.absolute( robot.poses, robot.gantry)) new_position = (x, y, z) return "Move complete. New position: {}".format(new_position)
The carriage moves the mount in the Z axis, and the gantry moves in X and Y Mount movements do not have the same protections calculated in to an existing `move` command like Pipette does, so the safest thing is to home the Z axis, then move in X and Y, then move down to the specified Z height
def set_network_settings(self, mtu, sock_snd, sock_rcv, tcp_wnd_snd, tcp_wnd_rcv): if not isinstance(mtu, baseinteger): raise TypeError("mtu can only be an instance of type baseinteger") if not isinstance(sock_snd, baseinteger): raise TypeError("sock_snd can only be an instance of type baseinteger") if not isinstance(sock_rcv, baseinteger): raise TypeError("sock_rcv can only be an instance of type baseinteger") if not isinstance(tcp_wnd_snd, baseinteger): raise TypeError("tcp_wnd_snd can only be an instance of type baseinteger") if not isinstance(tcp_wnd_rcv, baseinteger): raise TypeError("tcp_wnd_rcv can only be an instance of type baseinteger") self._call("setNetworkSettings", in_p=[mtu, sock_snd, sock_rcv, tcp_wnd_snd, tcp_wnd_rcv])
Sets network configuration of the NAT engine. in mtu of type int MTU (maximum transmission unit) of the NAT engine in bytes. in sock_snd of type int Capacity of the socket send buffer in bytes when creating a new socket. in sock_rcv of type int Capacity of the socket receive buffer in bytes when creating a new socket. in tcp_wnd_snd of type int Initial size of the NAT engine's sending TCP window in bytes when establishing a new TCP connection. in tcp_wnd_rcv of type int Initial size of the NAT engine's receiving TCP window in bytes when establishing a new TCP connection.
def operator_driven(drain_timeout=_DEFAULT_DRAIN, reset_timeout=_DEFAULT_RESET, max_consecutive_attempts=_DEFAULT_ATTEMPTS): return ConsistentRegionConfig(trigger=ConsistentRegionConfig.Trigger.OPERATOR_DRIVEN, drain_timeout=drain_timeout, reset_timeout=reset_timeout, max_consecutive_attempts=max_consecutive_attempts)
Define an operator-driven consistent region configuration. The source operator triggers drain and checkpoint cycles for the region. Args: drain_timeout: The drain timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. reset_timeout: The reset timeout, as either a :py:class:`datetime.timedelta` value or the number of seconds as a `float`. If not specified, the default value is 180 seconds. max_consecutive_attempts(int): The maximum number of consecutive attempts to reset the region. This must be an integer value between 1 and 2147483647, inclusive. If not specified, the default value is 5. Returns: ConsistentRegionConfig: the configuration.
def get_field_label(self, trans, field): try: object_field_label = trans._meta.get_field_by_name(field)[0].verbose_name except Exception: try: object_field_label = self.sender._meta.get_field_by_name(field)[0].verbose_name except Exception: object_field_label = field return object_field_label
Get the field label from the _meta api of the model :param trans: :param field: :return:
def availableRoles(self): if not hasattr(self.instructor,'instructorprivatelessondetails'): return [] return [ [x.id,x.name] for x in self.instructor.instructorprivatelessondetails.roles.all() ]
Some instructors only offer private lessons for certain roles, so we should only allow booking for the roles that have been selected for the instructor.
def addPhenotypeAssociationSet(self, phenotypeAssociationSet): id_ = phenotypeAssociationSet.getId() self._phenotypeAssociationSetIdMap[id_] = phenotypeAssociationSet self._phenotypeAssociationSetNameMap[ phenotypeAssociationSet.getLocalId()] = phenotypeAssociationSet self._phenotypeAssociationSetIds.append(id_)
Adds the specified g2p association set to this backend.
def sar(self, count): count = operator.index(count) if count < 0: raise ValueError('negative shift') if count > self._width: count = self._width return BinWord(self._width, self.to_sint() >> count, trunc=True)
Performs an arithmetic right-shift of a BinWord by the given number of bits. Bits shifted out of the word are lost. The word is filled on the left with copies of the top bit. The shift count can be an arbitrary non-negative number, including counts larger than the word (a word filled with copies of the sign bit is returned in this case).
def markov_blanket(self, beta, alpha): likelihood_blanket = self.likelihood_markov_blanket(beta) state_blanket = self.state_likelihood_markov_blanket(beta,alpha,0) for i in range(self.state_no-1): likelihood_blanket = np.append(likelihood_blanket,self.likelihood_markov_blanket(beta)) state_blanket = np.append(state_blanket,self.state_likelihood_markov_blanket(beta,alpha,i+1)) return likelihood_blanket + state_blanket
Creates total Markov blanket for states Parameters ---------- beta : np.array Contains untransformed starting values for latent variables alpha : np.array A vector of states Returns ---------- Markov blanket for states
def add_watch_callback(self, *args, **kwargs): try: return self.watcher.add_callback(*args, **kwargs) except queue.Empty: raise exceptions.WatchTimedOut()
Watch a key or range of keys and call a callback on every event. If timeout was declared during the client initialization and the watch cannot be created during that time the method raises a ``WatchTimedOut`` exception. :param key: key to watch :param callback: callback function :returns: watch_id. Later it could be used for cancelling watch.
def extract_scalar_reward(value, scalar_key='default'): if isinstance(value, float) or isinstance(value, int): reward = value elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)): reward = value[scalar_key] else: raise RuntimeError('Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int.') return reward
Extract scalar reward from trial result. Raises ------ RuntimeError Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int.
def package_info(pkg_name): indent = " " for config, _ in _iter_packages(): if pkg_name == config["name"]: print("Package:", pkg_name) print(indent, "Platform:", config["platform"]) print(indent, "Version:", config["version"]) print(indent, "Path:", config["path"]) print(indent, "Worlds:") for world in config["maps"]: world_info(world["name"], world_config=world, initial_indent=" ")
Prints the information of a package. Args: pkg_name (str): The name of the desired package to get information
def resolve_label_conflict(mapping, old_labels=None, new_labels=None): if old_labels is None: old_labels = set(mapping) if new_labels is None: new_labels = set(itervalues(mapping)) counter = itertools.count(2 * len(mapping)) old_to_intermediate = {} intermediate_to_new = {} for old, new in iteritems(mapping): if old == new: continue if old in new_labels or new in old_labels: lbl = next(counter) while lbl in new_labels or lbl in old_labels: lbl = next(counter) old_to_intermediate[old] = lbl intermediate_to_new[lbl] = new else: old_to_intermediate[old] = new return old_to_intermediate, intermediate_to_new
Resolve a self-labeling conflict by creating an intermediate labeling. Args: mapping (dict): A dict mapping the current variable labels to new ones. old_labels (set, optional, default=None): The keys of mapping. Can be passed in for performance reasons. These are not checked. new_labels (set, optional, default=None): The values of mapping. Can be passed in for performance reasons. These are not checked. Returns: tuple: A 2-tuple containing: dict: A map from the keys of mapping to an intermediate labeling dict: A map from the intermediate labeling to the values of mapping.
def create_admin(app, appbuilder, username, firstname, lastname, email, password): auth_type = { c.AUTH_DB: "Database Authentications", c.AUTH_OID: "OpenID Authentication", c.AUTH_LDAP: "LDAP Authentication", c.AUTH_REMOTE_USER: "WebServer REMOTE_USER Authentication", c.AUTH_OAUTH: "OAuth Authentication", } _appbuilder = import_application(app, appbuilder) click.echo( click.style( "Recognized {0}.".format( auth_type.get(_appbuilder.sm.auth_type, "No Auth method") ), fg="green", ) ) role_admin = _appbuilder.sm.find_role(_appbuilder.sm.auth_role_admin) user = _appbuilder.sm.add_user( username, firstname, lastname, email, role_admin, password ) if user: click.echo(click.style("Admin User {0} created.".format(username), fg="green")) else: click.echo(click.style("No user created an error occured", fg="red"))
Creates an admin user
def add(parent, idx, value): if isinstance(parent, dict): if idx in parent: raise JSONPatchError("Item already exists") parent[idx] = value elif isinstance(parent, list): if idx == "" or idx == "~": parent.append(value) else: parent.insert(int(idx), value) else: raise JSONPathError("Invalid path for operation")
Add a value to a dict.
def get_resources_by_search(self, resource_query, resource_search): if not self._can('search'): raise PermissionDenied() return self._provider_session.get_resources_by_search(resource_query, resource_search)
Pass through to provider ResourceSearchSession.get_resources_by_search
def copy_plan(modeladmin, request, queryset): for plan in queryset: plan_copy = deepcopy(plan) plan_copy.id = None plan_copy.available = False plan_copy.default = False plan_copy.created = None plan_copy.save(force_insert=True) for pricing in plan.planpricing_set.all(): pricing.id = None pricing.plan = plan_copy pricing.save(force_insert=True) for quota in plan.planquota_set.all(): quota.id = None quota.plan = plan_copy quota.save(force_insert=True)
Admin command for duplicating plans preserving quotas and pricings.
def neg_int(i): try: if isinstance(i, string_types): i = int(i) if not isinstance(i, int) or i > 0: raise Exception() except: raise ValueError("Not a negative integer") return i
Simple negative integer validation.
def default(cls): "Make the current foreground color the default." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK wAttributes |= win32.FOREGROUND_GREY wAttributes &= ~win32.FOREGROUND_INTENSITY cls._set_text_attributes(wAttributes)
Make the current foreground color the default.
def _norm(self, x): return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7)
Compute the safe norm.
def copyHiddenToContext(self): for item in list(self.contextLayers.items()): if self.verbosity > 2: print('Hidden layer: ', self.getLayer(item[0]).activation) if self.verbosity > 2: print('Context layer before copy: ', item[1].activation) item[1].copyActivations(self.getLayer(item[0]).activation) if self.verbosity > 2: print('Context layer after copy: ', item[1].activation)
Uses key to identify the hidden layer associated with each layer in the self.contextLayers dictionary.
async def on_raw_cap_ls(self, params): to_request = set() for capab in params[0].split(): capab, value = self._capability_normalize(capab) if capab in self._capabilities: continue attr = 'on_capability_' + pydle.protocol.identifierify(capab) + '_available' supported = (await getattr(self, attr)(value)) if hasattr(self, attr) else False if supported: if isinstance(supported, str): to_request.add(capab + CAPABILITY_VALUE_DIVIDER + supported) else: to_request.add(capab) else: self._capabilities[capab] = False if to_request: self._capabilities_requested.update(x.split(CAPABILITY_VALUE_DIVIDER, 1)[0] for x in to_request) await self.rawmsg('CAP', 'REQ', ' '.join(to_request)) else: await self.rawmsg('CAP', 'END')
Update capability mapping. Request capabilities.
def app1(self): for m in self._markers: if m.marker_code == JPEG_MARKER_CODE.APP1: return m raise KeyError('no APP1 marker in image')
First APP1 marker in image markers.
def bool(name, execute_bool=True, default=None): def wrapped(func): @functools.wraps(func) def _decorator(*args, **kwargs): if core.isset(name) and core.bool(name) == execute_bool: return func(*args, **kwargs) elif default is not None and default == execute_bool: return func(*args, **kwargs) return _decorator return wrapped
Only execute the function if the boolean variable is set. Args: name: The name of the environment variable execute_bool: The boolean value to execute the function on default: The default value if the environment variable is not set (respects `execute_bool`) Returns: The function return value or `None` if the function was skipped.
def remove_pointer(type_): nake_type = remove_alias(type_) if not is_pointer(nake_type): return type_ elif isinstance(nake_type, cpptypes.volatile_t) and \ isinstance(nake_type.base, cpptypes.pointer_t): return cpptypes.volatile_t(nake_type.base.base) elif isinstance(nake_type, cpptypes.const_t) and \ isinstance(nake_type.base, cpptypes.pointer_t): return cpptypes.const_t(nake_type.base.base) elif isinstance(nake_type, cpptypes.volatile_t) \ and isinstance(nake_type.base, cpptypes.const_t) \ and isinstance(nake_type.base.base, cpptypes.pointer_t): return ( cpptypes.volatile_t(cpptypes.const_t(nake_type.base.base.base)) ) return nake_type.base
removes pointer from the type definition If type is not pointer type, it will be returned as is.
def get(self, run_id): config = _read_json(_path_to_config(self.directory, run_id)) run = _read_json(_path_to_run(self.directory, run_id)) try: info = _read_json(_path_to_info(self.directory, run_id)) except IOError: info = {} return _create_run(run_id, run, config, info)
Return the run associated with a particular `run_id`. :param run_id: :return: dict :raises FileNotFoundError
def total_weight(self): weights = self._raw_weights() if weights.shape[1] == 0: return 0.0 elif weights.shape[1] < self._ntaps: return np.sum(np.mean(weights, axis=1)) else: return self._filter_coeffs.dot(np.sum(weights, axis=0))
Read a weight from the sensor in grams. Returns ------- weight : float The sensor weight in grams.
def derivative(self, point): r point = self.domain.element(point) diff = point - self.vector dist = self.vector.dist(point) if dist == 0: raise ValueError('not differentiable at the reference vector {!r}' ''.format(self.vector)) return InnerProductOperator(diff / dist)
r"""The derivative operator. ``DistOperator(y).derivative(z)(x) == ((y - z) / y.dist(z)).inner(x)`` This is only applicable in inner product spaces. Parameters ---------- x : `domain` `element-like` Point in which to take the derivative. Returns ------- derivative : `InnerProductOperator` Raises ------ ValueError If ``point == self.vector``, in which case the derivative is not well defined in the Frechet sense. Notes ----- The derivative cannot be written in a general sense except in Hilbert spaces, in which case it is given by .. math:: (D d(\cdot, y))(z)(x) = \langle (y-z) / d(y, z), x \rangle Examples -------- >>> r2 = odl.rn(2) >>> x = r2.element([1, 1]) >>> op = DistOperator(x) >>> derivative = op.derivative([2, 1]) >>> derivative([1, 0]) 1.0
def insert_fft_option_group(parser): fft_group = parser.add_argument_group("Options for selecting the" " FFT backend and controlling its performance" " in this program.") fft_group.add_argument("--fft-backends", help="Preference list of the FFT backends. " "Choices are: \n" + str(get_backend_names()), nargs='*', default=[]) for backend in get_backend_modules(): try: backend.insert_fft_options(fft_group) except AttributeError: pass
Adds the options used to choose an FFT backend. This should be used if your program supports the ability to select the FFT backend; otherwise you may simply call the fft and ifft functions and rely on default choices. This function will also attempt to add any options exported by available backends through a function called insert_fft_options. These submodule functions should take the fft_group object as argument. Parameters ---------- parser : object OptionParser instance
def get_id(self, details=False): res = { "alignak": getattr(self, 'alignak_name', 'unknown'), "type": getattr(self, 'type', 'unknown'), "name": getattr(self, 'name', 'unknown'), "version": VERSION } return res
Get daemon identification information :return: A dict with the following structure :: { "alignak": selfAlignak instance name "type": daemon type "name": daemon name "version": Alignak version } :rtype: dict
def parse_s2bs(s2bs): s2b = {} for s in s2bs: for line in open(s): line = line.strip().split('\t') s, b = line[0], line[1] s2b[s] = b return s2b
convert s2b files to dictionary
def options(self, group, target=None, defaults=True): if target is None: target = self.path if self.groups.get(group, None) is None: return None if self.parent is None and target and (self is not Store.options()) and defaults: root_name = self.__class__.__name__ replacement = root_name + ('' if len(target) == len(root_name) else '.') option_key = target.replace(replacement,'') match = Store.options().find(option_key) if match is not Store.options(): return match.options(group) else: return Options() elif self.parent is None: return self.groups[group] parent_opts = self.parent.options(group,target, defaults) return Options(**dict(parent_opts.kwargs, **self.groups[group].kwargs))
Using inheritance up to the root, get the complete Options object for the given node and the specified group.
def set_add(self, key, value, create=False, **kwargs): op = SD.array_addunique('', value) try: sdres = self.mutate_in(key, op, **kwargs) return self._wrap_dsop(sdres) except E.SubdocPathExistsError: pass
Add an item to a set if the item does not yet exist. :param key: The document ID :param value: Value to add :param create: Create the set if it does not exist :param kwargs: Arguments to :meth:`mutate_in` :return: A :class:`~.OperationResult` if the item was added, :raise: :cb_exc:`NotFoundError` if the document does not exist and `create` was not specified. .. seealso:: :meth:`map_add`
def _format_ret(self, full_ret): ret = {} out = '' retcode = 0 for key, data in six.iteritems(full_ret): ret[key] = data['ret'] if 'out' in data: out = data['out'] ret_retcode = self._get_retcode(data) if ret_retcode > retcode: retcode = ret_retcode return ret, out, retcode
Take the full return data and format it to simple output
def _add_column(self, label, field): assert self.headers is not None cols = 0 if len(self._headers) > 0: cols = max([int(c.cell.col) for c in self._headers]) new_col = cols + 1 if int(self._ws.col_count.text) < new_col: self._ws.col_count.text = str(new_col) self._update_metadata() cell = self._service.UpdateCell(1, new_col, label, self._ss.id, self.id) self._headers.append(cell)
Add a new column to the table. It will have the header text ``label``, but for data inserts and queries, the ``field`` name must be used. If necessary, this will expand the size of the sheet itself to allow for the new column.
def set_debug(self, debug=1): self._check_if_ready() self.debug = debug self.main_loop.debug = debug
Set the debug level. :type debug: int :param debug: The debug level.
def add_codedValue(self, name, code): if self._codedValues is None: self._codedValues = [] self._codedValues.append( {"name": name, "code": code} )
adds a value to the coded value list
def parse_strike_dip(strike, dip): strike = parse_azimuth(strike) dip, direction = split_trailing_letters(dip) if direction is not None: expected_direc = strike + 90 if opposite_end(expected_direc, direction): strike += 180 if strike > 360: strike -= 360 return strike, dip
Parses strings of strike and dip and returns strike and dip measurements following the right-hand-rule. Dip directions are parsed, and if the measurement does not follow the right-hand-rule, the opposite end of the strike measurement is returned. Accepts either quadrant-formatted or azimuth-formatted strikes. For example, this would convert a strike of "N30E" and a dip of "45NW" to a strike of 210 and a dip of 45. Parameters ---------- strike : string A strike measurement. May be in azimuth or quadrant format. dip : string The dip angle and direction of a plane. Returns ------- azi : float Azimuth in degrees of the strike of the plane with dip direction indicated following the right-hand-rule. dip : float Dip of the plane in degrees.
def get_data_by_slug(model, slug, kind='', **kwargs): instance = get_instance_by_slug(model, slug, **kwargs) if not instance: return return ins2dict(instance, kind)
Get instance data by slug and kind. Raise 404 Not Found if there is no data. This function requires model has a `slug` column. :param model: a string, model name in rio.models :param slug: a string used to query by `slug`. This requires there is a slug field in model definition. :param kind: a string specified which kind of dict tranformer should be called. :return: a dict or None.
def tee2(process, filter): while True: line = process.stderr.readline() if line: if sys.version_info[0] >= 3: line = decode(line) stripped_line = line.rstrip() if filter(stripped_line): sys.stderr.write(line) elif process.returncode is not None: process.stderr.close() break
Read lines from process.stderr and echo them to sys.stderr. The 'filter' is a callable which is invoked for every line, receiving the line as argument. If the filter returns True, the line is echoed to sys.stderr.
def get_symmetry_from_database(hall_number): _set_no_error() rotations = np.zeros((192, 3, 3), dtype='intc') translations = np.zeros((192, 3), dtype='double') num_sym = spg.symmetry_from_database(rotations, translations, hall_number) _set_error_message() if num_sym is None: return None else: return {'rotations': np.array(rotations[:num_sym], dtype='intc', order='C'), 'translations': np.array(translations[:num_sym], dtype='double', order='C')}
Return symmetry operations corresponding to a Hall symbol. The Hall symbol is given by the serial number in between 1 and 530. The symmetry operations are given by a dictionary whose keys are 'rotations' and 'translations'. If it fails, None is returned.
def _phi_node_contains(self, phi_variable, variable): if self.variable_manager[self.function.addr].is_phi_variable(phi_variable): return variable in self.variable_manager[self.function.addr].get_phi_subvariables(phi_variable) return False
Checks if `phi_variable` is a phi variable, and if it contains `variable` as a sub-variable. :param phi_variable: :param variable: :return:
def save_workspace(self, filename=''): r if filename == '': filename = 'workspace' + '_' + time.strftime('%Y%b%d_%H%M%p') filename = self._parse_filename(filename=filename, ext='pnm') d = {} for sim in self.values(): d[sim.name] = sim with open(filename, 'wb') as f: pickle.dump(d, f)
r""" Saves all the current Projects to a 'pnm' file Parameters ---------- filename : string, optional If no filename is given, a name is genrated using the current time and date. See Notes for more information on valid file names. See Also -------- save_project Notes ----- The filename can be a string such as 'saved_file.pnm'. The string can include absolute path such as 'C:\networks\saved_file.pnm', or can be a relative path such as '..\..\saved_file.pnm', which will look 2 directories above the current working directory. It can also be a path object object such as that produced by ``pathlib`` or ``os.path`` in the Python standard library.
def _output_validators(self): if self._walk_for_type('Boolean'): print("from .validators import boolean") if self._walk_for_type('Integer'): print("from .validators import integer") vlist = self.override.get_validator_list() for override in vlist: if override.startswith('common/'): override = override.lstrip('common/') filename = "validators" else: filename = "%s_validators" % self.filename print("from .%s import %s" % (filename, override))
Output common validator types based on usage.
def create_working_dir(config, prefix): basepath = config.get("Execution", "directory") if not prefix: prefix = 'opensubmit' finalpath = tempfile.mkdtemp(prefix=prefix + '_', dir=basepath) if not finalpath.endswith(os.sep): finalpath += os.sep logger.debug("Created fresh working directory at {0}.".format(finalpath)) return finalpath
Create a fresh temporary directory, based on the fiven prefix. Returns the new path.
def ball_pick(n, d, rng=None): def valid(r): return vector_mag_sq(r) < 1.0 return rejection_pick(L=2.0, n=n, d=d, valid=valid, rng=rng)
Return cartesian vectors uniformly picked on the unit ball in an arbitrary number of dimensions. The unit ball is the space enclosed by the unit sphere. The picking is done by rejection sampling in the unit cube. In 3-dimensional space, the fraction `\pi / 6 \sim 0.52` points are valid. Parameters ---------- n: integer Number of points to return. d: integer Number of dimensions of the space in which the ball lives Returns ------- r: array, shape (n, d) Sample cartesian vectors.
def update_colors(self, colors): colors = np.array(colors, dtype=np.uint8) self._vbo_c.set_data(colors) self._vbo_c.unbind()
Update the colors
def run(self): log.debug("Starting Kafka producer I/O thread.") while self._running: try: self.run_once() except Exception: log.exception("Uncaught error in kafka producer I/O thread") log.debug("Beginning shutdown of Kafka producer I/O thread, sending" " remaining records.") while (not self._force_close and (self._accumulator.has_unsent() or self._client.in_flight_request_count() > 0)): try: self.run_once() except Exception: log.exception("Uncaught error in kafka producer I/O thread") if self._force_close: self._accumulator.abort_incomplete_batches() try: self._client.close() except Exception: log.exception("Failed to close network client") log.debug("Shutdown of Kafka producer I/O thread has completed.")
The main run loop for the sender thread.
def arg_bool(name, default=False): v = request.args.get(name, '') if not len(v): return default return v in BOOL_TRUISH
Fetch a query argument, as a boolean.
def files_in_dir(dirpath, wildcard="*", startpath=None): import glob filelist = [] if startpath is not None: completedirpath = os.path.join(startpath, dirpath) else: completedirpath = dirpath if os.path.exists(completedirpath): logger.info('completedirpath = ' + completedirpath) else: logger.error('Wrong path: ' + completedirpath) raise Exception('Wrong path : ' + completedirpath) for infile in glob.glob(os.path.join(completedirpath, wildcard)): filelist.append(infile) if len(filelist) == 0: logger.error('No required files in path: ' + completedirpath) raise Exception('No required file in path: ' + completedirpath) return filelist
Function generates list of files from specific dir files_in_dir(dirpath, wildcard="*.*", startpath=None) dirpath: required directory wilcard: mask for files startpath: start for relative path Example files_in_dir('medical/jatra-kiv','*.dcm', '~/data/')
def delete_kb(kb_name): db.session.delete(models.KnwKB.query.filter_by( name=kb_name).one())
Delete given kb from database. :param kb_name: knowledge base name
def _generate_author_query(self, author_name): name_variations = [name_variation.lower() for name_variation in generate_minimal_name_variations(author_name)] if author_name_contains_fullnames(author_name): specialized_author_filter = [ { 'bool': { 'must': [ { 'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: names_variation[0]} }, generate_match_query( ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author'], names_variation[1], with_operator_and=True ) ] } } for names_variation in product(name_variations, name_variations) ] else: specialized_author_filter = [ {'term': {ElasticSearchVisitor.AUTHORS_NAME_VARIATIONS_FIELD: name_variation}} for name_variation in name_variations ] query = { 'bool': { 'filter': { 'bool': { 'should': specialized_author_filter } }, 'must': { 'match': { ElasticSearchVisitor.KEYWORD_TO_ES_FIELDNAME['author']: author_name } } } } return generate_nested_query(ElasticSearchVisitor.AUTHORS_NESTED_QUERY_PATH, query)
Generates a query handling specifically authors. Notes: The match query is generic enough to return many results. Then, using the filter clause we truncate these so that we imitate legacy's behaviour on returning more "exact" results. E.g. Searching for `Smith, John` shouldn't return papers of 'Smith, Bob'. Additionally, doing a ``match`` with ``"operator": "and"`` in order to be even more exact in our search, by requiring that ``full_name`` field contains both
def parse_file(infile, exit_on_error=True): try: a, b, mag = np.atleast_2d( np.genfromtxt( infile, usecols=[0, 1, 2], delimiter=',' ) ).T except IOError as e: if exit_on_error: logger.error("There seems to be a problem with the input file, " "the format should be: RA_degrees (J2000), Dec_degrees (J2000), " "Magnitude. There should be no header, columns should be " "separated by a comma") sys.exit(1) else: raise e return a, b, mag
Parse a comma-separated file with columns "ra,dec,magnitude".
def get_candidate_election(self, election): return CandidateElection.objects.get(candidate=self, election=election)
Get a CandidateElection.
def unused_keys(self): unused = set() for k, c in self._children.items(): if isinstance(c, ConfigNode): if not c.has_been_accessed(): unused.add(k) else: for ck in c.unused_keys(): unused.add(k+'.'+ck) return unused
Lists all keys which are present in the ConfigTree but which have not been accessed.
def getApplicationsErrorNameFromEnum(self, error): fn = self.function_table.getApplicationsErrorNameFromEnum result = fn(error) return result
Returns a string for an applications error
def each(iterable = None, *, name = None, metric = call_default): if iterable is None: return _each_decorator(name, metric) else: return _do_each(iterable, name, metric)
Measure time elapsed to produce each item of an iterable :arg iterable: any iterable :arg function metric: f(name, 1, time) :arg str name: name for the metric
def decipher(self,string): string = string.upper() mapping = dict(zip(self.key,self.table)) ptext = "" for i in string: ptext += mapping[i] return self.demorse(ptext)
Decipher string using FracMorse cipher according to initialised key. Example:: plaintext = FracMorse('ROUNDTABLECFGHIJKMPQSVWXYZ').decipher(ciphertext) :param string: The string to decipher. :returns: The enciphered string.
def map(self, mapper: Callable[[Any], Any]) -> 'List': try: ret = List.from_iterable([mapper(x) for x in self]) except TypeError: ret = List.from_iterable([partial(mapper, x) for x in self]) return ret
Map a function over a List.
def constraint(self, n=-1, fid=0): c = self._getval("constr", fid) if n < 0 or n > self.deficiency(fid): return c else: raise RuntimeError("Not yet implemented")
Obtain the set of orthogonal equations that make the solution of the rank deficient normal equations possible. :param fid: the id of the sub-fitter (numerical)
def parse_name(cls, name: str, default: T = None) -> T: if not name: return default name = name.lower() return next((item for item in cls if name == item.name.lower()), default)
Parse specified name for IntEnum; return default if not found.
def getWeb(url, isFeed): socket.setdefaulttimeout(300) loadedWeb = urllib2.build_opener() loadedWeb.addheaders = getHeaders() if isFeed: web = etree.parse(loadedWeb.open(url)) else: web = html.parse(loadedWeb.open(url)) return web
Download url and parse it with lxml. If "isFeed" is True returns lxml.etree else, returns lxml.html