content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def simple_computation(maximum_terms:int=None, configuration_of=None): """ Simple 4-operand computations 移除了分数项,因为除法运算会表示为分数 禁用了括号(random_term的expression参数),因为会导致溢出 :return: Problem object """ if not configuration_of: configuration_of = 'simple_computation' func_config = combine_configurations(type_config[configuration_of], global_config) if maximum_terms: func_config['maximum_terms'] = maximum_terms func_config['symbol'] = process_raw_symbol(func_config['symbol']) number_of_terms = randint(2, func_config['maximum_terms']) random_term_kwargs = {'interval':func_config['interval'], 'denominator_interval': func_config['denominator_interval'], 'float_precision': func_config['float_precision'], 'frac': False, 'expression': False, 'symbol': func_config['symbol']} str_question = str(random_term(**random_term_kwargs)) for term_number in range(number_of_terms): # operand term str_question += choice(['+', '-', '*', '/'])+str(random_term(**random_term_kwargs)) answer = sympify(str_question) if func_config['symbol'] else sympify(str_question).round(func_config['float_precision']) question = sympify(str_question, evaluate=False) problem = Problem(question, answer) return problem
0dae4396f74b9a254d0c882c022018c7a69d69cd
3,647,884
def add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key): """Console script for media_server_utils.""" core.add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key) return 0
ac058ff014c76a3af4054076961b52011bc7329c
3,647,885
def cut_graph(G, w): """ Cut a graph down to a given depth Inputs: - G Input graph - w Depth to cut to Output: - cut_G Cut graph """ # Copy the initial graph and get the number of nodes cut_G = G.copy() N = len(G.nodes) # Check all nodes for i in range(N): # If the depth is greater than w, remove the node if nx.shortest_path_length(G, source=0, target=i) > w: cut_G.remove_node(i) return cut_G
314f804d2d42146e1dcfb1f4a5373f92a4fe2f17
3,647,886
def archive_filter_search(articles_qs): """ gets the qs and filters and sends back to the hook for rendering. """ return articles_qs.exclude(updates__article__stage=STAGE_PUBLISHED)
2f5783de98110b9b74cabcf571a5919b409d1479
3,647,887
from typing import List def init_plotscript(config, markets: List, startup_candles: int = 0): """ Initialize objects needed for plotting :return: Dict with candle (OHLCV) data, trades and pairs """ if "pairs" in config: pairs = expand_pairlist(config['pairs'], markets) else: pairs = expand_pairlist(config['exchange']['pair_whitelist'], markets) # Set timerange to use timerange = TimeRange.parse_timerange(config.get('timerange')) data = load_data( datadir=config.get('datadir'), pairs=pairs, timeframe=config['timeframe'], timerange=timerange, startup_candles=startup_candles, data_format=config.get('dataformat_ohlcv', 'json'), ) if startup_candles and data: min_date, max_date = get_timerange(data) logger.info(f"Loading data from {min_date} to {max_date}") timerange.adjust_start_if_necessary(timeframe_to_seconds(config['timeframe']), startup_candles, min_date) no_trades = False filename = config.get('exportfilename') if config.get('no_trades', False): no_trades = True elif config['trade_source'] == 'file': if not filename.is_dir() and not filename.is_file(): logger.warning("Backtest file is missing skipping trades.") no_trades = True try: trades = load_trades( config['trade_source'], db_url=config.get('db_url'), exportfilename=filename, no_trades=no_trades, strategy=config.get('strategy'), ) except ValueError as e: raise OperationalException(e) from e if not trades.empty: trades = trim_dataframe(trades, timerange, 'open_date') return {"ohlcv": data, "trades": trades, "pairs": pairs, "timerange": timerange, }
220118a5d438227932ba9473471d59ff03b44412
3,647,888
from typing import List from typing import Callable def get_one(data: List[LogEntry], filterfun: Callable) -> LogEntry: """Get a single entry and assert that after filtering only a single entry remains.""" filtered = list(filter(filterfun, data)) if len(filtered) != 1: raise ValueError(f"Entries not unique after filtering: {filtered}") return filtered[0]
ece1b0b9c654f85eda89e2fc8736c84b4a2ca9ca
3,647,889
from typing import Optional def create(env_name: str, episode_length: int = 1000, action_repeat: int = 1, auto_reset: bool = True, batch_size: Optional[int] = None, **kwargs) -> Env: """Creates an Env with a specified brax system.""" env = _envs[env_name](**kwargs) if episode_length is not None: env = wrappers.EpisodeWrapper(env, episode_length, action_repeat) if batch_size: env = wrappers.VectorWrapper(env, batch_size) if auto_reset: env = wrappers.AutoResetWrapper(env) return env # type: ignore
1d7e8bf147843799f01e7a894b86fce74ca86da7
3,647,891
def from_software_version(software_version): """ Returns the product version dependant limits_constants. This is based on the running software version on the product and can change based on up when you ask a cluster if upgrading. Args: software_version: (str) software version ex "3.1.2.0" or "2.2.7" """ return _get_limits(software_version=software_version)
fc978610a6aa55a956bb849cae107bc134934f55
3,647,892
def _from_parse_feature(parse_feature): """Convert a single feature spec to a ColumnSchema.""" # FixedLenFeature if isinstance(parse_feature, tf.FixedLenFeature): representation = FixedColumnRepresentation(parse_feature.default_value) return ColumnSchema(parse_feature.dtype, parse_feature.shape, representation) # FixedLenSequenceFeature if isinstance(parse_feature, tf.FixedLenSequenceFeature): raise ValueError('DatasetSchema does not support ' 'FixedLenSequenceFeature yet.') # VarLenFeature if isinstance(parse_feature, tf.VarLenFeature): representation = ListColumnRepresentation() return ColumnSchema(parse_feature.dtype, [None], representation) # SparseFeature if isinstance(parse_feature, tf.SparseFeature): index_field = SparseIndexField(name=parse_feature.index_key, is_sorted=parse_feature.already_sorted) representation = SparseColumnRepresentation( value_field_name=parse_feature.value_key, index_fields=[index_field]) return ColumnSchema(parse_feature.dtype, [parse_feature.size], representation) raise ValueError('Cannot interpret feature spec {} with type {}'.format( parse_feature, type(parse_feature)))
9c7034c3b7663a0c49dc69dbf8507f90f4cacf83
3,647,893
import json def conditional_patch_resource( service_account_json, base_url, project_id, cloud_region, dataset_id, fhir_store_id ): """ If a resource is found based on the search criteria specified in the query parameters, updates part of that resource by applying the operations specified in a JSON Patch document. """ url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region) # The search query in this request updates all Observations # if the subject of the Observation is a particular patient. resource_path = "{}/datasets/{}/fhirStores/{}/fhir/Observation".format( url, dataset_id, fhir_store_id ) # Make an authenticated API request session = get_session(service_account_json) headers = {"Content-Type": "application/json-patch+json"} body = json.dumps( [ { "op": "replace", "path": "/valueQuantity/value", # Sets the BPM for all matching Observations to 80. This # is the portion of the request being patched. "value": 80, } ] ) # The search query is passed in as a query string parameter. params = {"identifier": "my-code-system|ABC-12345"} response = session.patch(resource_path, headers=headers, params=params, data=body) response.raise_for_status() print(response.url) resource = response.json() print( "Conditionally patched all Observations with the " "identifier 'my-code-system|ABC-12345' to use a BPM of 80." ) print(json.dumps(resource, indent=2)) return resource
e04eada0184d38c8d0b1ec4620fd5ef1f0bc90d5
3,647,894
def cal_big_F(p, f): """ calculate finite strain big F for linearized form not fully tested :param p: pressure :param f: small f :return: big F """ return p / (3. * f * np.power((1. + 2. * f), 2.5))
2d98b9e525837cd8d6dd17266f92f959743ad8f2
3,647,895
from typing import List def multiply_aug(data_aug: List[str], factor: int) -> List[str]: """ The original idea here was to use to to speed up some vasp calculations for supercells by initializing the entire CHGCAR file. The current code does not deal with transformation of the Augemetation charges after regridding. This is a naive way to multiply the Augmentation data in the CHGCAR, a real working implementation will require analysis of the PAW projection operators. However, even with such an implementation, the speed up will be minimal due to VASP's interal minimization algorithms. Args: data_aug: The original augmentation data from a CHGCAR factor: The multiplication factor (some integer number of times it gets repeated) Returns: List of strings for each line of the Augmentation data. """ res = [] # type: List[str] cur_block = [] # type: List[str] cnt = 0 for ll in data_aug: if "augmentation" in ll: if cur_block: for j in range(factor): cnt += 1 cur_block[ 0 ] = f"augmentation occupancies{cnt:>4}{cur_block[0].split()[-1]:>4}\n" res.extend(cur_block) cur_block = [ll] else: cur_block.append(ll) else: for j in range(factor): cnt += 1 cur_block[ 0 ] = f"augmentation occupancies{cnt:>4}{cur_block[0].split()[-1]:>4}\n" res.extend(cur_block) return res
2baef4c98dbb83f1a08f11e58f3c4cf82ad8ea64
3,647,896
def _parse_instance_chain(chain_str): """ 返回对象链解析出来的实例对象。""" chain = chain_str.split('.') instance_name = chain.pop(0) attr = session['instances'][instance_name] for attr_name in chain: attr = getattr(attr, attr_name) return attr
531b78ee80f3b6437b885ef89b7f285e6cf6a8a5
3,647,897
import math def epochs_lists( draw, start_time=math.inf, max_epochs=5, min_deme_size=FLOAT_EPS, max_deme_size=FLOAT_MAX, ): """ A hypothesis strategy for creating lists of Epochs for a deme. :param float start_time: The start time of the deme. :param int max_epochs: The maximum number of epochs in the list. """ assert max_epochs >= 2 times = draw( st.lists( st.floats( min_value=0, max_value=min(FLOAT_MAX, start_time), exclude_max=True, width=32, ), unique=True, min_size=1, max_size=max_epochs, ) ) times.sort(reverse=True) epochs = [] for i, end_time in enumerate(times): start_size = draw(st.floats(min_value=min_deme_size, max_value=max_deme_size)) if i == 0 and math.isinf(start_time): end_size = start_size else: end_size = draw(st.floats(min_value=min_deme_size, max_value=max_deme_size)) cloning_rate = draw(st.floats(min_value=0, max_value=1)) selfing_rate = draw(st.floats(min_value=0, max_value=prec32(1 - cloning_rate))) epochs.append( dict( end_time=end_time, start_size=start_size, end_size=end_size, cloning_rate=cloning_rate, selfing_rate=selfing_rate, ) ) return epochs
9eebece7ac1dc2f9ad6d13f7368de62a6db9433c
3,647,898
import torch def mlp_layers(nch_input, nch_layers, b_shared=True, bn_momentum=0.1, dropout=0.0): """ [B, Cin, N] -> [B, Cout, N] or [B, Cin] -> [B, Cout] """ layers = [] last = nch_input for i, outp in enumerate(nch_layers): if b_shared: weights = torch.nn.Conv1d(last, outp, 1) else: weights = torch.nn.Linear(last, outp) layers.append(weights) layers.append(torch.nn.BatchNorm1d(outp, momentum=bn_momentum)) layers.append(torch.nn.ReLU()) if b_shared == False and dropout > 0.0: layers.append(torch.nn.Dropout(dropout)) last = outp return layers
8085b99b828fcbadee191d90737d582f7dd9ce73
3,647,899
def path_content_to_string(path): """Convert contents of a directory recursively into a string for easier comparison.""" lines = [] prefix_len = len(path + sep) for root, dirs, files in walk(path): for dir_ in dirs: full_path = join(root, dir_) relative_path = full_path[prefix_len:] size = 0 type_ = "dir" hash_ = "0" line = "{},{},{},{}".format(relative_path, type_, size, hash_) lines.append(line) for filename in files: full_path = join(root, filename) relative_path = full_path[prefix_len:] size = getsize(full_path) type_ = "file" if isfile(full_path) else "dir" hash_ = get_md5(full_path) line = "{},{},{},{}".format(relative_path, type_, size, hash_) lines.append(line) lines = sorted(lines) return "\n".join(lines)
10d350eb866642f52e350b76f01de2b7e0ff6a5d
3,647,901
def get_evts(rslt, a_params): """Return start and end times of candidate replay events.""" # get PC firing rates ## PC spks spks_pc = rslt.spks[:, :rslt.p['N_PC']] ## smoothed instantaneous firing rate avg'd over PCs fr_pc = smooth(spks_pc.sum(axis=1) / (rslt.dt * rslt.p['N_PC']), a_params['SMOOTH_FR']) # get start and end time idxs when PC FR is above threshold starts, ends = get_segments(fr_pc >= a_params['EVT_DTCN_TH']) # convert to time starts = starts.astype(float) * rslt.dt ends = ends.astype(float) * rslt.dt # remove too-short gaps btwn events if len(starts) > 0: starts, ends = remove_short_gaps(starts, ends, a_params['MIN_GAP_DUR']) # remove too-short events if len(starts) > 0: starts, ends = remove_short_evts(starts, ends, a_params['MIN_EVT_DUR']) # remove all events that start before min start time if len(starts): mask = starts > a_params['MIN_START'] starts = starts[mask] ends = ends[mask] # remove final event if it hits end of smln if len(ends) and ends[-1] >= rslt.ts[-1]: starts = starts[:-1] ends = ends[:-1] return starts, ends
c8c6867588d72f97dd687dbe17b7494bc534fa1e
3,647,902
from typing import List from typing import Dict def get_threatfeed_command(client: Client, threatfeed_id: int = None): """ Retrieves the current list of threatFeed objects already configured in the system :param threatfeed_id: The id of the ThreatFeed object. :param client: Vectra Client """ raw_response = client.http_request(url_suffix=f'threatFeeds/{threatfeed_id}' if threatfeed_id else 'threatFeeds') count = demisto.get(raw_response, 'meta.count') if count == 0: return "Couldn't find any results", {}, raw_response res = raw_response.get('threatFeeds') # type: ignore feeds: List[Dict] = [res] if not isinstance(res, List) else sorted(res, key=lambda h: h.get('id')) # type: ignore for feed in feeds: feed.update(feed.get('defaults')) # type: ignore headers = ['id', 'name', 'certainty', 'category', 'duration', 'indicatorType'] readable_output = tableToMarkdown(name='Rules table', t=feeds, headers=headers) context = [] for feed in feeds: context.append(createContext( { 'ID': feed.get('id'), 'Name': feed.get('name'), 'Duration': feed.get('duration'), 'Category': feed.get('category'), 'Certainty': feed.get('certainty'), 'Data': feed.get('data'), 'IndicatorType': feed.get('indicatorType'), }, removeNull=True) ) outputs = {'Vectra.ThreatFeed(val.ID==obj.ID)': context} return readable_output, outputs, raw_response
7f0b37a724720aea73170d3575ed5b08dec7ea85
3,647,903
def email_subscribe_pending_confirm(hexdomain): """Send a confirmation email for a user.""" domain = tools.parse_domain(hexdomain) if domain is None: flask.abort(400, 'Malformed domain or domain not represented in hexadecimal format.') hide_noisy = bool(flask.request.form.get('hide_noisy')) email_address = flask.request.form['email_address'] if email_address.strip() == '': return flask.redirect('/email/subscribe/{}/0?hide_noisy={}'.format( hexdomain, hide_noisy )) verify_code = tools.random_id() verify_url = flask.request.url_root + 'email/verify/{}'.format(verify_code) email_body = email_tools.render_email( 'confirm.html', domain=domain, verify_url=verify_url ) repository.propose_subscription( verify_code, email_address, domain, hide_noisy ) emailer.send( email_address, 'Please verify your subscription', email_body ) return flask.render_template('www/email/pending_verify.html', domain=domain)
9932554a3349e3cf1ecd958d15dd762f787f61c7
3,647,904
def getTrackIds(sp, username, playlist, offset=0): """ Returns the ids of the tracks contained in a playlist :param sp: A spotipy.Spotify object to be used for the request. :param username: The username of the user who's playlists you want the retrieve. :param playlist: Name of the playlist from wich the tracks are retrieved. :param offset: Do not worry about this parameter, it is used for recursion. :returns: A list containing all the ids of the tracks that are in the playlist. """ limit = 100 fields = "items(track(id)), total" api_response = sp.user_playlist_tracks(username, playlist["id"], fields, limit=limit, offset=offset) track_ids = [x["track"]["id"] for x in api_response["items"]] if api_response["total"] > limit + offset: next_page = getTrackIds(sp, username, playlist, offset + limit) for item in next_page: track_ids.append(item) return track_ids
5b4e621022f49137b7fd4547bf5ab4efe92b4515
3,647,905
def children_of_head(element: Element): """ get children element of body element :param element: :return: """ if element is None: return [] body_xpath = '//head' body_element = element.xpath(body_xpath) if body_element: body_element.__class__ = Element return descendants(body_element, True) return []
90b47d1c0c3f04231ea5dade3f7e9288339eef71
3,647,906
def network(name, nodes): """nodes: [ NodeMeta, ... ]""" return NetworkMeta(name=name, nodes=nodes)
5c0394ae2a31b83ac6889a4b973f51c1cdb1a0d9
3,647,907
def condensed_to_cosine(condensed_format): """Get mhd direction cosine for this condensed format axis""" axis = Axis.from_condensed_format(condensed_format) return permutation_to_cosine(axis.dim_order, axis.dim_flip)
25b3f0d63a84fa687bf4b238b53288fb8f64918b
3,647,908
def get_plants_for_species(item): """Get list of plants for a species.""" if item is None or not item or item['name'] is None: return @cached('species_list_{}.json'.format(item['name']), directory='../../data/wikipedia') def get(): def table(dom): # We need to switch to table format - the wikipedia articles # are inconsistent. rows = dom.find('.mw-parser-output .wikitable tr') if not rows: return headings = [h.text.strip() for h in rows[0]] for row in rows[1:]: row_data = {} tds = row.findall('td') if tds is None: continue for i, td in enumerate(tds): try: row_data[headings[i]] = td.text or None except IndexError: continue data.append(row_data) data = [] url = 'https://en.wikipedia.org{}'.format(item['link']) _, dom = get_dom(url) # Try to be specific, but broaden scope if none found. if 'bamboo' in item['name']: table(dom) else: links = dom.find('.mw-parser-output ul li a') if not links: links = dom.find('.mw-parser-output ol li a') if not links: links = dom.find('.mw-parser-output li a') if links: for link in links: if link.text is None: continue # Reference links embedded within the lists. if any([ # External link is invalid link.get('href', '').startswith('http'), # Anchors, invalid link link.get('href', '').startswith('#'), # Not real links/text link.text.startswith('['), link.text == '^', link.text.startswith('\\'), ]): continue data.append(dict(name=link.text, link=link.get('href'))) else: table(dom) return data return get()
ed9522fd97ac101a6040f0485d06b1b88a834060
3,647,909
def check_password_hash(password, password_hash, salt, N=1 << 14, r=8, p=1, buflen=64): """ Given a password, hash, salt this function verifies the password is equal to hash/salt. Args: - ``password``: The password to perform check on. Returns: - ``bool`` """ candidate_hash = generate_password_hash(password, salt, N, r, p, buflen) return safe_str_cmp(password_hash, candidate_hash)
1e3a75235b11c45746cabf03dcf05b88e610c02f
3,647,910
def _dB_calc(J_field, x, y, z): """ Calcualtes the magnetic field at a point due to a current. Args: J_field (VectorField): Vector field describing the current that the magnetic field is generated from. x: The x coordinate of the point in the magnetic field. y: The y coordinate of the point in the magnetic field. z: The z coordinate of the point in the magnetic field. Returns: tuple (u,v,w): A tuple with the magnitude of the magnetic field at the point (x,y,z). """ B = (0, 0, 0) for coordinates, mag in J_field.vec_field.items(): biot_savart_constant = 10 ** (-7) distance = (x - coordinates[0], y - coordinates[1], z - coordinates[2]) distanceMag = linalg.norm(distance) distanceUnit = (distance[0] / distanceMag, distance[1] / distanceMag, distance[2] / distanceMag) crossProduct = np.cross(coordinates, distanceUnit) dB = (biot_savart_constant*crossProduct) / (distanceMag**2) B = np.add(B, dB) return B
0a66c59b4ece95c4f683a842b63e80d2a13a697a
3,647,911
import math def create_mpl_subplot(images, color=True): """create mpl subplot with all images in list. even when the color is set to false it still seems to :param images: the list of images to plot :type images: cv2 image :param color: whether to plot in color or grayscale, defaults to True :type color: boolean :return: the complete plot :rtype: mpl plot """ if not color: plt.set_cmap('gray') n = math.ceil(math.sqrt(len(images))) i = 1 for img in images: plt.subplot(n, n, i) plt.imshow(img) plt.xticks([]), plt.yticks([]) i += 1 return plt
259c352438c19c4639d78ef67aeb9af0271d6ade
3,647,912
def filter_subclasses(superclass, iter): """Returns an iterable of class obects which are subclasses of `superclass` filtered from a source iteration. :param superclass: The superclass to filter against :return: An iterable of classes which are subclasses of `superclass` """ return filter(lambda klass: issubclass(klass, superclass), iter)
2a891835379dfa3661d781d0c1860b650df013f0
3,647,913
def get_database_table_column_name(_conn: psycopg2.extensions.connection, _table: str) -> list: """ Taken from: https://kb.objectrocket.com/postgresql/get-the-column-names-from-a-postgresql-table-with-the-psycopg2-python-adapter-756 # noqa defines a function that gets the column names from a PostgreSQL table. """ # declare an empty list for the column names columns = [] # declare cursor objects from the connection col_cursor = _conn.cursor() # concatenate string for query to get column names # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = 'some_table'; # noqa col_names_str = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE " col_names_str += "table_name = '{}';".format(_table) # print the SQL string # print("\ncol_names_str:", col_names_str) try: sql_object = sql.SQL( # pass SQL statement to sql.SQL() method col_names_str ).format( # pass the identifier to the Identifier() method sql.Identifier(_table) ) # execute the SQL string to get list with col names in a tuple col_cursor.execute(sql_object) # get the tuple element from the liast col_names = (col_cursor.fetchall()) # print list of tuples with column names # print("\ncol_names:", col_names) # iterate list of tuples and grab first element for tup in col_names: # append the col name string to the list columns += [tup[0]] # close the cursor object to prevent memory leaks col_cursor.close() except Exception as err: print("get_columns_names ERROR:", err) # return the list of column names return columns
9486e75792e2b7a63db589727a621fd648213487
3,647,914
import asyncio def retry(*exceptions, retries=3, cooldown=5, verbose=True): """ Decorate an async function to execute it a few times before giving up. Hopes that problem is resolved by another side shortly. Args: exceptions (Tuple[Exception]) : The exceptions expected during function execution retries (int): Number of retries of function execution. cooldown (int): Seconds to wait before retry. verbose (bool): Specifies if we should log about not successful attempts. """ def wrap(func): @wraps(func) async def inner(*args, **kwargs): retries_count = 0 while True: try: result = await func(*args, **kwargs) except exceptions as err: retries_count += 1 if retries_count > retries: raise ValueError( func.__qualname__, args, kwargs) from err if cooldown: await asyncio.sleep(cooldown) else: return result return inner return wrap
fea7b786815e2aabedf37e8011485eda3c989fe7
3,647,915
def process_student(filename_or_URL): """calls mark_student on one student HTML file Creates a BeautifulSoup object and calls mark_student. If the filename_or_URL starts with "https://", attempt to get Firefox cookies before reading from the URL. Parameters: ---------- filename_or_URL: either a local filename, or a URL Returns: -------- return-value of mark_student """ if filename_or_URL[0:8] == "https://": cookiejar=get_cookie_jar() soup=soup_from_URL(filename_or_URL, cookiejar) else: soup=soup_from_file(filename_or_URL) #for q in list_questions(soup): # print(q, "mark=",mark_question(q)) return mark_student(soup)
185d396f4005954fcc26b0c8ab3c9711b511c611
3,647,916
def find_CH2OH_in_chain(atoms, cycles): """ this function finds terminal CH2OH that C is not in a cycle H ' O(6) ' H ' / R---C(5)---H """ end_carbon_indices = [] end_carbon_indices_atom_list = {} for _ in range(len(atoms)): name = atoms[_].get_atom_name() if name != 'C' or is_in_a_cycle(cycles, _): continue nghs_c5 = atoms[_].get_ngh() nums_c5, nghs_list_c5 = parse_atom_nghs(nghs_c5, ['H', 'C', 'O']) if nums_c5['H'] == 2 and nums_c5['O'] == 1: o6_index = nghs_list_c5['O'][0] nghs_o6 = atoms[o6_index].get_ngh() nums_o6, nghs_list_o6 = parse_atom_nghs(nghs_o6, ['H', 'C', 'O']) if len(nghs_o6) == 2 and nums_o6['H'] == 1 and nums_o6['C'] == 1: end_carbon_indices.append(_) end_carbon_indices_atom_list[_] = [] for __ in nghs_c5: ___ = __[0] if ___ not in end_carbon_indices_atom_list[_]: end_carbon_indices_atom_list[_].append(___) for __ in nghs_o6: ___ = __[0] if ___ not in end_carbon_indices_atom_list[_]: end_carbon_indices_atom_list[_].append(___) return end_carbon_indices, end_carbon_indices_atom_list
55dbb4767c905fd90f5085b4fcea08e80bf43902
3,647,917
def keep_point(p, frame): """ p: TrackedPoint instance frame: image (numpy array) """ if not p.in_bounds(): return False if p.coasted_too_long(): return False if p.coasted_too_far(): return False return True
7f51b9f15ac8befe07b463875b9245194aebbef0
3,647,919
from typing import Dict from typing import List def sqrt(node: NodeWrapper, params: Dict[str, np.ndarray], xmap: Dict[str, XLayer]) -> List[XLayer]: """ONNX Sqrt to XLayer Sqrt conversion function""" logger.info("ONNX Sqrt -> XLayer Sqrt") assert len(node.get_outputs()) == 1 name = node.get_outputs()[0] bottoms = node.get_inputs() iX = xmap[bottoms[0]] # NCHW X = px.ops.sqrt( op_name=px.stringify(name), in_xlayers=[iX], onnx_id=name) return [X]
711dfe71eaf337c75acd07bf3f09ca8a7c090fa4
3,647,920
def get_testcase_desc(suite, testcase_name): """ Return the description of the testcase with the given name of the given testsuite. Remove trailing line returns if applicable, they look nasty in the reports (text and otherwise) """ desc = getattr(suite, testcase_name).__doc__ return strings.format_description(desc.rstrip()) if desc else ""
1a97e02047d42f76328cc55debe8006bcfb80a43
3,647,921
def slave_freq_one_pc(args): """Wrapper to be able to use Pool""" return args, freq_one_pc(*args)
0627685181cbec45564066ea9e29601fc3717257
3,647,922
def base10_to_base26_alph(base10_no): """Convert base-10 integer to base-26 alphabetic system. This function provides a utility to write pdb/psf files such that it can add many more than 9999 atoms and 999 residues. Parameters ---------- base10_no: int The integer to convert to base-26 alphabetic system Returns ------- str The converted base-26 system string See Also -------- mbuild.conversion._to_base: Helper function to perform a base-n conversion """ return _to_base(base10_no, base=26)
67aed6602c6813702416310518c892f02fdb58ef
3,647,923
import pickle def train(model, X, y, name: str): """ train a model on the given training set and optionally save it to disk :param model: the model to train :param X: the sample images, list of numpy arrays (greyscale images) :param y: the target labels, list of strings (kanji) :param name: name of the model used to save it on disk, or None if it is not to be saved :return: the trained model """ # reshape X to 2d X = np.asarray(X) X = X.reshape((X.shape[0], -1)) print("fitting on {} samples".format(len(y))) # train the model print("begin fitting") model.fit(X, y) print("done fitting") # optionally save trained model if name is not None: with open("trained_{}.pkl".format(name), 'wb') as f: pickle.dump(model, f, pickle.HIGHEST_PROTOCOL) return model
9b5e4e03b25d7692a233370dd2db1fd2435365e0
3,647,924
def run_phage_boost(genecalls, model_file, verbose): """ Run phage boost :param model_file: The model file that is probably something like model_delta_std_hacked.pickled.silent.gz :param genecalls: The pandas data frame of gene calls :param verbose: more output :return: """ # rolling params period = 20 win_type = 'parzen' min_periods = 1 # region finding params threshold = 0.9 length = 10 gaps = 5 neighbouring = 0 alpha = 0.001 # calculate features from gene calls if verbose: message("Calculating features", "GREEN") df = calculate_features(genecalls) # load model model, feats, feats_, limit = read_model_from_file(model_file) # transform data df = get_predictions.get_deltas(df[feats_]) if verbose: message("Transforming gene predictions to regions", "GREEN") # transform single gene predictions to regions newgenecalls, nphages, res = predict(model, genecalls, df, feats, period, win_type, min_periods, limit, threshold, length, gaps, neighbouring, alpha) return res
28592977483d092cf67e7eb7bbd98b911044084b
3,647,926
from datetime import datetime def get_wishlist_confirmation_time(): """Return whether user can confirm his wishlist or not No request params. """ try: confirmation_time = g.user.get_wishlist_confirmation_time() can_confirm = datetime.now() - confirmation_time > timedelta( days = 1 ) if confirmation_time is not None else True return data_response( { 'can_confirm' : can_confirm } ) except AuthorizationError: return error_response( 'Neuspješno dohvaćanje vremena zadnjeg potvrđivanja: Nedozvoljena mogućnost.', 403 ) # except: # return error_response( 'Neuspješno dohvaćanje vremena zadnjeg potvrđivanja.' )
89c2fbe9a3801805194dbf41274ba348a87954b1
3,647,927
def get_bprop_npu_clear_float_status(self): """Grad definition for `NPUClearFloatStatus` operation.""" def bprop(x, out, dout): return (zeros_like(x),) return bprop
8e0733a9d6294e507bb99f3536cf1898137a0f3b
3,647,928
import pathlib def path_to_filename(path, with_suffix=True): """Get filename from path. Parameters ========== path : str Path to retrieve file name from e.g. '/path/to/image.png'. with_suffix : bool Whether to include the suffix of file path in file name. Returns ======= str The file name of the path e.g. 'image.png' or 'image' if `with_suffix` is false. """ p = pathlib.Path(path) if with_suffix: return str(p.name) else: return str(p.with_suffix("").name)
45ecfb6e263e65de7165a69eda99bc8de2a157f4
3,647,929
def encode3(Married): """ This function encodes a loan status to either 1 or 0. """ if Married == 'Yes': return 1 else: return 0
3be5ca3b773e5ded6fe8ec834bc0d99af68bf9e6
3,647,930
def rle_encoding(img, mask_val=1): """ Turns our masks into RLE encoding to easily store them and feed them into models later on https://en.wikipedia.org/wiki/Run-length_encoding Args: img (np.array): Segmentation array mask_val (int): Which value to use to create the RLE Returns: RLE string """ dots = np.where(img.T.flatten() == mask_val)[0] run_lengths = [] prev = -2 for b in dots: if (b > prev + 1): run_lengths.extend((b + 1, 0)) run_lengths[-1] += 1 prev = b return ' '.join([str(x) for x in run_lengths])
8639094ea57138212a73b179eed593e248363314
3,647,932
import asyncio def alt(*ops, priority=False, default=_Undefined): """ alt(*ops, priority=False, default=Undefined) Returns an awaitable representing the first and only channel operation to finish. Accepts a variable number of operations that either get from or put to a channel and commits only one of them. If no `default` is provided, then only the first op to finish will be committed. If `default` is provided and none of the `ops` finish immediately, then no operation will be committed and `default` will instead be used to complete the returned awaitable. Args: ops: Operations that either get from or put to a channel. A get operation is represented as simply a channel to get from. A put operation is represented as an iterable of the form ``[channel, val]``, where `val` is an item to put onto `channel`. priority: An optional bool. If True, operations will be tried in order. If False, operations will be tried in random order. default: An optional value to use in case no operation finishes immediately. Returns: An awaitable that evaluates to a tuple of the form ``(val, ch)``. If `default` is not provided, then `val` will be what the first successful operation returned and `ch` will be the channel used in that operation. If `default` is provided and none of the operations complete immediately, then the awaitable will evaluate to ``(default, 'default')``. Raises: ValueError: If `ops` is empty or contains both a get and put operation to the same channel. RuntimeError: If the calling thread has no running event loop. See Also: :func:`b_alt` """ flag = create_flag() future = FlagFuture(flag) ret = _alts(flag, future_deliver_fn(future), ops, priority, default) if ret is not None: asyncio.Future.set_result(future, ret) return future
e26660938b760e9f3e2b43375c26ee1a2e946056
3,647,933
def make_class_dictable( cls, exclude=constants.default_exclude, exclude_underscore=constants.default_exclude_underscore, fromdict_allow_pk=constants.default_fromdict_allow_pk, include=None, asdict_include=None, fromdict_include=None, ): """Make a class dictable Useful for when the Base class is already defined, for example when using Flask-SQLAlchemy. Warning: This method will overwrite existing attributes if they exists. :param exclude: Will be set as dictalchemy_exclude on the class :param exclude_underscore: Will be set as dictalchemy_exclude_underscore \ on the class :param fromdict_allow_pk: Will be set as dictalchemy_fromdict_allow_pk\ on the class :param include: Will be set as dictalchemy_include on the class. :param asdict_include: Will be set as `dictalchemy_asdict_include` on the \ class. If not None it will override `dictalchemy_include`. :param fromdict_include: Will be set as `dictalchemy_fromdict_include` on \ the class. If not None it will override `dictalchemy_include`. :returns: The class """ setattr(cls, "dictalchemy_exclude", exclude) setattr(cls, "dictalchemy_exclude_underscore", exclude_underscore) setattr(cls, "dictalchemy_fromdict_allow_pk", fromdict_allow_pk) setattr(cls, "asdict", asdict) setattr(cls, "fromdict", fromdict) setattr(cls, "__iter__", iter) setattr(cls, "dictalchemy_include", include) setattr(cls, "dictalchemy_asdict_include", asdict_include) setattr(cls, "dictalchemy_fromdict_include", fromdict_include) return cls
87a0ed0b0baa1449396921c3651c9d2ef4549f35
3,647,934
def async_request_config( hass, name, callback=None, description=None, description_image=None, submit_caption=None, fields=None, link_name=None, link_url=None, entity_picture=None, ): """Create a new request for configuration. Will return an ID to be used for sequent calls. """ if link_name is not None and link_url is not None: description += f"\n\n[{link_name}]({link_url})" if description_image is not None: description += f"\n\n![Description image]({description_image})" if (instance := hass.data.get(_KEY_INSTANCE)) is None: instance = hass.data[_KEY_INSTANCE] = Configurator(hass) request_id = instance.async_request_config( name, callback, description, submit_caption, fields, entity_picture ) if DATA_REQUESTS not in hass.data: hass.data[DATA_REQUESTS] = {} hass.data[DATA_REQUESTS][request_id] = instance return request_id
f3c8ee70b3b51debeb404660a35491b07c78170e
3,647,935
def get_blueprint_docs(blueprints, blueprint): """Returns doc string for blueprint.""" doc_string = blueprints[blueprint].__doc__ return doc_string
8a334a9ddd1ff5fe844821152f4312b2db0e9da5
3,647,936
from typing import Counter def getColorPalatte(image, num, show_chart=False): """ Returns the most prevelent colors of an image arguments: image - image to sample colors from num - number of colors to sample show_chart - show a visual representation of the colors selected """ modified_image = np.array(image) modified_image = cv2.resize( modified_image, (600, 400), interpolation=cv2.INTER_AREA ) modified_image = modified_image.reshape(-1, 3) clf = KMeans(n_clusters=num) labels = clf.fit_predict(modified_image) counts = Counter(labels) # sort to ensure correct color percentage counts = dict(sorted(counts.items())) center_colors = clf.cluster_centers_ center_colors = np.rint(center_colors) center_colors = center_colors.astype(int) center_colors = [tuple(color) for color in center_colors] # We get ordered colors by iterating through the keys ordered_colors = [center_colors[i] for i in counts.keys()] hex_colors = [RGB2HEX(ordered_colors[i]) for i in counts.keys()] rgb_colors = [ordered_colors[i] for i in counts.keys()] if show_chart: plt.figure(figsize=(10, 6)) plt.subplot(1, 2, 1) plt.imshow(image) plt.subplot(1, 2, 2) plt.pie(counts.values(), labels=hex_colors, colors=hex_colors) plt.show() return rgb_colors
9eaa125cefb1b23161479eaf2e2765ebb58bcd9e
3,647,937
import numpy def run_classifier(data,labels, shuffle=False,nfolds=8,scale=True, clf=None,verbose=False): """ run classifier for a single dataset """ features=data if scale: features=sklearn.preprocessing.scale(features) if shuffle: numpy.random.shuffle(labels) if not clf: clf=sklearn.svm.SVC(C=C) skf = sklearn.model_selection.StratifiedKFold(5,shuffle=True) pred=numpy.zeros(labels.shape[0]) for train, test in skf.split(features,labels): clf.fit(features[train,:],labels[train]) pred[test]=clf.predict(features[test,:]) if verbose: print(clf.best_params_) acc=sklearn.metrics.accuracy_score(labels, pred) return acc
3a479971040131cb05f7441112ad0e951b8374f2
3,647,938
def merge_sort(linked_list): """ Sorts a linked list in ascending order - Recursively divide the linked list into sublist containing a single node - Repeatedly merge the sublist to produce sorted sublist until one remains Returns a sorted linked list Takes O(kn log n) time """ if linked_list.size() == 1: return linked_list elif linked_list.head is None: return linked_list left_half, right_half = split(linked_list) left = merge_sort(left_half) right = merge_sort(right_half) return merge(left, right)
07dfee0cb5bdcddb688431f00aeb0520f1d2ed1c
3,647,939
def is_binary(file_path): """ Returns True if the file is binary """ with open(file_path, 'rb') as fp: data = fp.read(1024) if not data: return False if b'\0' in data: return True return False
2df56f93d4e31220a580bf1e659c3c51b96260d2
3,647,940
def convert_host_names_to_ids(session, instanceList): """Look up ID of each instance on Amazon. Returns a list of IDs.""" idList = [] for i in instanceList: instId = aws.instanceid_lookup(session, i) if instId is not None: idList.append(instId) return idList
128d3d4a5e5e0729b477687f665abac43d29aef9
3,647,941
def handle_over_max_file_size(error): """ Args: error: Returns: """ print("werkzeug.exceptions.RequestEntityTooLarge" + error) return 'result : file size is overed.'
2bbdc1e38dea46ac08c314b3962ed63063578021
3,647,942
from typing import Mapping import logging import urllib def _load_from_url(url: str, chinese_only=False) -> Mapping[str, DictionaryEntry]: """Reads the dictionary from a local file """ logging.info('Opening the dictionary remotely') with urllib.request.urlopen(url) as dict_file: data = dict_file.read().decode('utf-8') return _load_dictionary(data.splitlines(), chinese_only)
b496db0b767c17476ecbdc7cab89b962f19a4510
3,647,943
def get_images(): """ Canned response for glance images list call """ return images
3f26e3e0527c0885cfff3470e5d40baf19b3ca82
3,647,944
def firstUniqChar(self, s): """ :type s: str :rtype: int """ letters = 'abcdefghijklmnopqrstuvwxyz' index = [s.index(l) for l in letters if s.count(l) == 1] return min(index) if len(index) > 0 else -1
8b42b281c9e80cf89fb9952a0fe7c60c5270c210
3,647,945
def get_form_class_for_class(klass): """ A helper function for creating a model form class for a model on the fly. This is used with models (usually part of an inheritance hierarchy) which define a function **get_editable_fields** which returns an iterable of the field names which should be placed in the form. """ meta_dict = dict(model=klass) if hasattr(klass, 'get_editable_fields'): meta_dict['fields'] = klass.get_editable_fields() meta = type('Meta', (),meta_dict) modelform_class = type('modelform', (forms.ModelForm,), {"Meta": meta}) return modelform_class
12fcdcf9a3155e718bab28b30b466824ad425508
3,647,946
def dict_remove_key(d, key, default=None): """ removes a key from dict __WITH__ side effects Returns the found value if it was there (default=None). It also modifies the original dict. """ return d.pop(key, default)
47bd0edf2bbeb9bad5c696d289c69d2d9eba6a1b
3,647,947
from typing import Optional def momentum(snap: Snap, mask: Optional[ndarray] = None) -> ndarray: """Calculate the total momentum vector on a snapshot. Parameters ---------- snap The Snap object. mask Mask the particle arrays. Default is None. Returns ------- ndarray The total momentum as a vector (px, py, pz). """ mass: ndarray = snap['mass'] vel: ndarray = snap['velocity'] if mask is None: return (mass * vel).sum(axis=0) return (mass * vel)[mask].sum(axis=0)
022f58ed494fb381e650ec0f61ed8d75704b846c
3,647,948
import types def limit_epochs(tensor, num_epochs=None, name=None): """Returns tensor num_epochs times and then raises an OutOfRange error. Args: tensor: Any Tensor. num_epochs: An integer (optional). If specified, limits the number of steps the output tensor may be evaluated. name: A name for the operations (optional). Returns: tensor or OutOfRange. """ if num_epochs is None: return tensor if num_epochs <= 0: raise ValueError("num_epochs must be > 0 not %d." % num_epochs) with ops.op_scope([tensor], name, "limit_epochs") as name: zero64 = constant_op.constant(0, dtype=types.int64) epochs = variables.Variable(zero64, name="epochs") counter = epochs.count_up_to(num_epochs) with ops.control_dependencies([counter]): return array_ops.identity(tensor, name=name)
82fa475bf4fe0f63a66c5718dc2a0336b887b3d6
3,647,949
def hex_machine(emit): """ State machine for hex escaped characters in strings Args: emit (callable): callback for parsed value (number) Returns: callable: hex-parsing state machine """ left = 4 num = 0 def _hex(byte_data): nonlocal num, left if 0x30 <= byte_data <= 0x39: # 0-9 i = byte_data - 0x30 elif 0x61 <= byte_data <= 0x66: # a-f i = byte_data - 0x57 elif 0x41 <= byte_data <= 0x46: # A-F i = byte_data - 0x37 else: raise Exception( "Invalid hex char in string hex escape: " + hex(byte_data)) left -= 1 num |= i << (left * 4) if left: return _hex return emit(num) return _hex
39232fdaf3c0ae19154e28307fb7f1254133dc94
3,647,950
import re def isbns(self, key, value): """Translates isbns fields.""" _isbns = self.get("identifiers", []) for v in force_list(value): subfield_u = clean_val("u", v, str) isbn = { "value": clean_val("a", v, str) or clean_val("z", v, str), "scheme": "ISBN", } if not isbn["value"]: raise IgnoreKey("identifiers") if subfield_u: volume = re.search(r"(\(*v[.| ]*\d+.*\)*)", subfield_u) if volume: volume = volume.group(1) subfield_u = subfield_u.replace(volume, "").strip() existing_volume = self.get("volume") if existing_volume: raise ManualImportRequired(subfield="u") self["volume"] = volume # WARNING! vocabulary document_identifiers_materials material = mapping( IDENTIFIERS_MEDIUM_TYPES, subfield_u, field=key, subfield="u" ) if material: isbn.update({"material": material}) if isbn not in _isbns: _isbns.append(isbn) return _isbns
6db2f27733155e33e64b2d2ffba621deda86808d
3,647,951
import requests def create_user(name, age, occupation): """ Function to post a new user. Parameters ---------- name : str Name of the user. age : int Age of the user. occupation : str Occupation of the user. Returns ------- message : str request_status : int HTTP response status code. `400` "User already exists" `201` "Created User `name`" Examples -------- >>> create_user(name = "micha", age= 28, occupation = 'PhD Student') "Created User micha", 201 """ # create a user user = dict( name = name, age = age, occupation = occupation, ) # post it (as shortcut) resp = requests.post("{}/user/{}".format(server,name), json=user) if resp.status_code == 400: return "User already exists", resp.status_code elif resp.status_code == 201: return "Created User {}".format(name), resp.status_code else: raise ApiError("Some unexpected ERROR code: {}".format(resp.status_code))
7e7a9a1071fd28a10beeaaf3922eaf36533334f8
3,647,952
import torch def gauss_dataset(dim, size=1e6): """ Creates a dataloader of randomly sampled gaussian noise The returned dataloader produces batsize batches of dim-sized vectors """ def samplef(bsize): return torch.randn(bsize, dim) ret = SampleDataset(samplef, size=size) return ret
224640cff465b7e73d091a799498f3282d309b4e
3,647,953
def nightwatch_environment(request): # convenience spelling """Run tests against this environment (staging, production, etc.)""" return request.config.getoption('--nightwatch-environment')
dc284660e062abf1b74a327e4b045cf79a64ee3a
3,647,954
def get_hrs(pid_arg): """ Pulls all recorded heart rate data for a patient from the database Args: pid_arg: patient_id to pull heart rate data for Returns: list: containing all recorded heart rates """ u5 = User.objects.raw({"_id": pid_arg}).first() return u5.heart_rate
48794e2b94359a81d05d435feb0cf39e52142ca1
3,647,955
def resolve(match, *objects): """Given an array of objects and a regex match, this function returns the first matched group if it exists in one of the objects, otherwise returns the orginial fully matches string by the regex. Example: if regex = \\\.([a-z]) and string = test\.abc, then the match = {group0: \.abc, group1: abc}. Assuimg one object: - obj = {abc: def}, then we return 'def' - obj = {test: value}, then we return \.abc Args: objects (array[dict]): the array of objects we use to look up the key in match.group(1) match: the regex match object Returns: str: the value of the matched group(1) in the first object found if exists, otherwise returns the fully matched string. """ for obj in objects: if obj is not None and match.group(1) in obj: return str(obj[match.group(1)]) return match.group(0)
52f59fb5248ba635866fcd59a549067c3984e460
3,647,956
from editor_python_test_tools.utils import Report from editor_python_test_tools.utils import TestHelper as helper import azlmbr.legacy.general as general import azlmbr.bus import azlmbr def Collider_CollisionGroupsWorkflow(): # type: () -> None """ Summary: Runs an automated test to ensure PhysX collision groups dictate whether collisions happen or not. The test has two phases (A and B) for testing collision groups under different circumstances. Phase A is run first and upon success Phase B starts. Level Description: Entities can be divided into 2 groups for the two phases, A and B. Each phase has identical entities with exception to Terrain, where Terrain_A has a collision group/layer set for demo_group1/demo1 and Terrain_B has a collision group/layer set for demo_group2/demo2. Each Phase has two boxes, Box_1 and Box_2, where each box has it's collision group/layer set to it's number (1 or 2). Each box is positioned just above the Terrain with gravity enabled. All entities for Phase B are deactivated by default. If Phase A is setup and executed successfully it's entities are deactivated and Phase B's entities are activated and validated before running the Phase B test. Expected behavior: When Phase A starts, it's two boxes should fall toward the terrain. Once the boxes' behavior is validated the entities from Phase A are deactivated and Phase B's entities are activated. Like in Phase A, the boxes in Phase B should fall towards the terrain. If all goes as expected Box_1_A and Box_2_B should collide with teh terrain, and Box_2A and Box_1_B should fall through the terrain. Test Steps: 0) [Define helper classes and functions] 1) Load the level 2) Enter game mode 3) Retrieve and validate entities 4) Phase A a) set up b) execute test c) log results (deactivate Phase A entities) 5) Phase B a) set up (activate Phase B entities) b) execute test c) log results 6) close editor Note: - This test file must be called from the Open 3D Engine Editor command terminal - Any passed and failed tests are written to the Editor.log file. Parsing the file or running a log_monitor are required to observe the test results. - The level for this test uses two PhysX Terrains and must be run with cmdline argument "-autotest_mode" to suppress the warning for having multiple terrains. :return: None """ # ******* Helper Classes ******** # Phase A's test results class PhaseATestData: total_results = 2 box_1_collided = False box_1_fell_through = True box_2_collided = False box_2_fell_through = False box_1 = None box_2 = None terrain = None box_1_pos = None box_2_pos = None terrain_pos = None @staticmethod # Quick check for validating results for Phase A def valid(): return ( PhaseATestData.box_1_collided and PhaseATestData.box_2_fell_through and not PhaseATestData.box_1_fell_through and not PhaseATestData.box_2_collided ) # Phase B's test results class PhaseBTestData: total_results = 2 box_1_collided = False box_1_fell_through = False box_2_collided = False box_2_fell_through = True box_1 = None box_2 = None terrain = None box_1_pos = None box_2_pos = None terrain_pos = None @staticmethod # Quick check for validating results for Phase B def valid(): return ( not PhaseBTestData.box_1_collided and not PhaseBTestData.box_2_fell_through and PhaseBTestData.box_1_fell_through and PhaseBTestData.box_2_collided ) # **** Helper Functions **** # ** Validation helpers ** # Attempts to validate an entity based on the name parameter def validate_entity(entity_name, msg_tuple): # type: (str, (str, str)) -> EntityId entity_id = general.find_game_entity(entity_name) Report.critical_result(msg_tuple, entity_id.IsValid()) return entity_id # Attempts to retrieve an entity's initial position and logs result def validate_initial_position(entity_id, msg_tuple): # type: (EntityId, (str, str)) -> azlmbr.math.Vector3 # Attempts to validate and return the entity's initial position. # logs the result to Report.result() using the tuple parameter pos = azlmbr.components.TransformBus(azlmbr.bus.Event, "GetWorldTranslation", entity_id) valid = not (pos is None or pos.IsZero()) entity_name = azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "GetEntityName", entity_id) Report.critical_result(msg_tuple, valid) Report.info_vector3(pos, "{} initial position:".format(entity_name)) return pos # ** Phase completion checks checks ** # Checks if we are done collecting data for phase A def done_collecting_results_a(): # type: () -> bool # Update positions PhaseATestData.box_1_pos = azlmbr.components.TransformBus( azlmbr.bus.Event, "GetWorldTranslation", PhaseATestData.box_1 ) PhaseATestData.box_2_pos = azlmbr.components.TransformBus( azlmbr.bus.Event, "GetWorldTranslation", PhaseATestData.box_2 ) # Check for boxes to fall through terrain if PhaseATestData.box_1_pos.z < PhaseATestData.terrain_pos.z: PhaseATestData.box_1_fell_through = True else: PhaseATestData.box_1_fell_through = False if PhaseATestData.box_2_pos.z < PhaseATestData.terrain_pos.z: PhaseATestData.box_2_fell_through = True else: PhaseATestData.box_2_fell_through = False results = 0 if PhaseATestData.box_1_collided or PhaseATestData.box_1_fell_through: results += 1 if PhaseATestData.box_2_collided or PhaseATestData.box_2_fell_through: results += 1 return results == PhaseATestData.total_results # Checks if we are done collecting data for phase B def done_collecting_results_b(): # type: () -> bool # Update positions PhaseBTestData.box_1_pos = azlmbr.components.TransformBus( azlmbr.bus.Event, "GetWorldTranslation", PhaseBTestData.box_1 ) PhaseBTestData.box_2_pos = azlmbr.components.TransformBus( azlmbr.bus.Event, "GetWorldTranslation", PhaseBTestData.box_2 ) # Check for boxes to fall through terrain if PhaseBTestData.box_1_pos.z < PhaseBTestData.terrain_pos.z: PhaseBTestData.box_1_fell_through = True else: PhaseBTestData.box_1_fell_through = False if PhaseBTestData.box_2_pos.z < PhaseBTestData.terrain_pos.z: PhaseBTestData.box_2_fell_through = True else: PhaseBTestData.box_2_fell_through = False results = 0 if PhaseBTestData.box_1_collided or PhaseBTestData.box_1_fell_through: results += 1 if PhaseBTestData.box_2_collided or PhaseBTestData.box_2_fell_through: results += 1 return results == PhaseBTestData.total_results # **** Event Handlers **** # Collision even handler for Phase A def on_collision_begin_a(args): # type: ([EntityId]) -> None collider_id = args[0] if (not PhaseATestData.box_1_collided) and PhaseATestData.box_1.Equal(collider_id): Report.info("Box_1_A / Terrain_A collision detected") PhaseATestData.box_1_collided = True if (not PhaseATestData.box_2_collided) and PhaseATestData.box_2.Equal(collider_id): Report.info("Box_2_A / Terrain_A collision detected") PhaseATestData.box_2_collided = True # Collision event handler for Phase B def on_collision_begin_b(args): # type: ([EntityId]) -> None collider_id = args[0] if (not PhaseBTestData.box_1_collided) and PhaseBTestData.box_1.Equal(collider_id): Report.info("Box_1_B / Terrain_B collision detected") PhaseBTestData.box_1_collided = True if (not PhaseBTestData.box_2_collided) and PhaseBTestData.box_2.Equal(collider_id): Report.info("Box_2_B / Terrain_B collision detected") PhaseBTestData.box_2_collided = True TIME_OUT = 1.5 # 1) Open level helper.init_idle() helper.open_level("Physics", "Collider_CollisionGroupsWorkflow") # 2) Enter game mode helper.enter_game_mode(Tests.enter_game_mode) # 3) Retrieve and validate entities PhaseATestData.box_1 = validate_entity("Box_1_A", Tests.box_1_a_valid) PhaseATestData.box_2 = validate_entity("Box_2_A", Tests.box_2_a_valid) PhaseATestData.terrain = validate_entity("Terrain_Entity_A", Tests.terrain_a_valid) PhaseBTestData.box_1 = validate_entity("Box_1_B", Tests.box_1_b_valid) PhaseBTestData.box_2 = validate_entity("Box_2_B", Tests.box_2_b_valid) PhaseBTestData.terrain = validate_entity("Terrain_Entity_B", Tests.terrain_b_valid) # Make sure Phase B objects are disabled azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.box_1) azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.box_2) azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseBTestData.terrain) # 4) *********** Phase A ***************** # 4.a) ** Set Up ** Report.info(" **** Beginning Phase A **** ") # Locate Phase A entities PhaseATestData.box_1_pos = validate_initial_position(PhaseATestData.box_1, Tests.box_1_a_pos_found) PhaseATestData.box_2_pos = validate_initial_position(PhaseATestData.box_2, Tests.box_2_a_pos_found) PhaseATestData.terrain_pos = validate_initial_position(PhaseATestData.terrain, Tests.terrain_a_pos_found) # Assign Phase A event handler handler_a = azlmbr.physics.CollisionNotificationBusHandler() handler_a.connect(PhaseATestData.terrain) handler_a.add_callback("OnCollisionBegin", on_collision_begin_a) # 4.b) Execute Phase A if not helper.wait_for_condition(done_collecting_results_a, TIME_OUT): Report.info("Phase A timed out: make sure the level is set up properly or adjust time out threshold") # 4.c) Log results for Phase A Report.result(Tests.box_1_a_did_collide_with_terrain, PhaseATestData.box_1_collided) Report.result(Tests.box_1_a_did_not_pass_through_terrain, not PhaseATestData.box_1_fell_through) Report.info_vector3(PhaseATestData.box_1_pos, "Box_1_A's final position:") Report.result(Tests.box_2_a_did_pass_through_terrain, PhaseATestData.box_2_fell_through) Report.result(Tests.box_2_a_did_not_collide_with_terrain, not PhaseATestData.box_2_collided) Report.info_vector3(PhaseATestData.box_2_pos, "Box_2_A's final position:") if not PhaseATestData.valid(): Report.info("Phase A failed test") # Deactivate entities for Phase A azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.box_1) azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.box_2) azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "DeactivateGameEntity", PhaseATestData.terrain) # 5) *********** Phase B ***************** # 5.a) ** Set Up ** Report.info(" *** Beginning Phase B *** ") # Activate entities for Phase B azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.box_1) azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.box_2) azlmbr.entity.GameEntityContextRequestBus(azlmbr.bus.Broadcast, "ActivateGameEntity", PhaseBTestData.terrain) # Initialize positions for Phase B PhaseBTestData.box_1_pos = validate_initial_position(PhaseBTestData.box_1, Tests.box_1_b_pos_found) PhaseBTestData.box_2_pos = validate_initial_position(PhaseBTestData.box_2, Tests.box_2_b_pos_found) PhaseBTestData.terrain_pos = validate_initial_position(PhaseBTestData.terrain, Tests.terrain_b_pos_found) # Assign Phase B event handler handler_b = azlmbr.physics.CollisionNotificationBusHandler() handler_b.connect(PhaseBTestData.terrain) handler_b.add_callback("OnCollisionBegin", on_collision_begin_b) # 5.b) Execute Phase B if not helper.wait_for_condition(done_collecting_results_b, TIME_OUT): Report.info("Phase B timed out: make sure the level is set up properly or adjust time out threshold") # 5.c) Log results for Phase B Report.result(Tests.box_1_b_did_not_collide_with_terrain, not PhaseBTestData.box_1_collided) Report.result(Tests.box_1_b_did_pass_through_terrain, PhaseBTestData.box_1_fell_through) Report.info_vector3(PhaseBTestData.box_1_pos, "Box_1_B's final position:") Report.result(Tests.box_2_b_did_not_pass_through_terrain, not PhaseBTestData.box_2_fell_through) Report.result(Tests.box_2_b_did_collide_with_terrain, PhaseBTestData.box_2_collided) Report.info_vector3(PhaseBTestData.box_2_pos, "Box_2_B's final position:") if not PhaseBTestData.valid(): Report.info("Phase B failed test") # 6) Exit Game mode helper.exit_game_mode(Tests.exit_game_mode) Report.info(" **** TEST FINISHED ****")
6463d4543a771a50709712012650c804b365fe81
3,647,957
import time from datetime import datetime def _timestamp(zone="Europe/Istanbul") -> int: """Return timestamp of now.""" return int(time.mktime(datetime.now(timezone(zone)).timetuple()))
871c1dcba8b6f581097c2e24d34903c00034fa03
3,647,958
def sumReplacements(tex, functionName): """ Search tex file for the keyString "\\apisummary{" and its matching parenthesis. All text between will be processed such that there are no consecutive spaces, no tabs, and unnecessary "\\n". The text will then have all the macros replaced and put back into its corresponding place in the text file. The strings "./ sectionStart" and "./ sectionEnd" are appending at the beginning and end of the processed text, respectively, for differenti- ation between the text with sections and "dangling text". These strings will not appear in the manpage as any line that begins with a period will be treated as a comment. """ startOfText = tex.find("\\apisummary{") endOfText = findMatchingBrace(tex, tex.find("{", startOfText)) sectionText = cleanText(tex[startOfText:endOfText]) tex = tex[:startOfText] + \ "./ sectionStart\n" + \ ".SH NAME\n" + functionName + " \- " \ + sectionText + "\n" + \ "./ sectionEnd\n" + tex[endOfText+1:] tex = tex.replace("\\apisummary{", "") return tex
8b2ed7bec78c6f2fa03c1308cc4a8fcdfbfa6f8d
3,647,959
from typing import Union from typing import List from typing import Optional def path(path: Union[str, List[str]], *, disable_stage_removal: Optional[bool] = False): """Validate the path in the event against the given path(s). The following APIErrorResponse subclasses are used: PathNotFoundError: When the path doesn't match. Args: path: A path literal or list of path literals to validate against. disable_stage_removal (bool): preserve the original path with stage. """ return _get_decorator( validate_path, path=path, disable_stage_removal=disable_stage_removal )
eff9b153e90e3d657733c5c83b13c77aef21395f
3,647,961
import math import torch def inference_fn(trained_model, remove, fixed_params, overwrite_fixed_params=False, days_of_purchases=710, days_of_clicks=710, lifespan_of_items=710, **params): """ Function to run inference inside the hyperparameter loop and calculate metrics. Parameters ---------- trained_model: Model trained during training of hyperparameter loop. remove: Percentage of data removed. See src.utils_data for more details. fixed_params: All parameters used during training of hyperparameter loop. See src.utils_data for more details. overwrite_fixed_params: If true, training parameters will overwritten by the parameters below. Can be useful if need to test the model on different parameters, e.g. that includes older clicks or purchases. days_of_purchases, days_of_clicks, lifespan_of_items: All parameters that can overwrite the training parameters. Only useful if overwrite_fixed_params is True. params: All other parameters used during training. Returns ------- recall: Recall on the test set. Relevant to compare with recall computed on hyperparametrization test set (since parameters like 'remove' and all overwritable parameters are different) Saves to file ------------- Metrics computed on the test set. """ # Import parameters if isinstance(fixed_params, str): path = fixed_params fixed_params = read_data(path) class objectview(object): def __init__(self, d): self.__dict__ = d fixed_params = objectview(fixed_params) if 'params' in params.keys(): # if isinstance(params['params'], str): path = params['params'] params = read_data(path) # Initialize data data_paths = DataPaths() fixed_params.remove = remove if overwrite_fixed_params: fixed_params.days_of_purchases = days_of_purchases fixed_params.days_of_clicks = days_of_clicks fixed_params.lifespan_of_items = lifespan_of_items data = DataLoader(data_paths, fixed_params) # Get graph valid_graph = create_graph( data.graph_schema, ) valid_graph = assign_graph_features(valid_graph, fixed_params, data, **params, ) dim_dict = {'user': valid_graph.nodes['user'].data['features'].shape[1], 'item': valid_graph.nodes['item'].data['features'].shape[1], 'out': params['out_dim'], 'hidden': params['hidden_dim']} all_sids = None if 'sport' in valid_graph.ntypes: dim_dict['sport'] = valid_graph.nodes['sport'].data['features'].shape[1] all_sids = np.arange(valid_graph.num_nodes('sport')) # get training and test ids ( train_graph, train_eids_dict, valid_eids_dict, subtrain_uids, valid_uids, test_uids, all_iids, ground_truth_subtrain, ground_truth_valid, all_eids_dict ) = train_valid_split( valid_graph, data.ground_truth_test, fixed_params.etype, fixed_params.subtrain_size, fixed_params.valid_size, fixed_params.reverse_etype, fixed_params.train_on_clicks, fixed_params.remove_train_eids, params['clicks_sample'], params['purchases_sample'], ) ( edgeloader_train, edgeloader_valid, nodeloader_subtrain, nodeloader_valid, nodeloader_test ) = generate_dataloaders(valid_graph, train_graph, train_eids_dict, valid_eids_dict, subtrain_uids, valid_uids, test_uids, all_iids, fixed_params, num_workers, all_sids, embedding_layer=params['embedding_layer'], n_layers=params['n_layers'], neg_sample_size=params['neg_sample_size'], ) num_batches_test = math.ceil((len(test_uids) + len(all_iids)) / fixed_params.node_batch_size) # Import model if isinstance(trained_model, str): path = trained_model trained_model = ConvModel(valid_graph, params['n_layers'], dim_dict, params['norm'], params['dropout'], params['aggregator_type'], fixed_params.pred, params['aggregator_hetero'], params['embedding_layer'], ) trained_model.load_state_dict(torch.load(path, map_location=device)) if cuda: trained_model = trained_model.to(device) trained_model.eval() with torch.no_grad(): embeddings = get_embeddings(valid_graph, params['out_dim'], trained_model, nodeloader_test, num_batches_test, cuda, device, params['embedding_layer'], ) for ground_truth in [data.ground_truth_purchase_test, data.ground_truth_test]: precision, recall, coverage = get_metrics_at_k( embeddings, valid_graph, trained_model, params['out_dim'], ground_truth, all_eids_dict[('user', 'buys', 'item')], fixed_params.k, True, # Remove already bought cuda, device, fixed_params.pred, params['use_popularity'], params['weight_popularity'], ) sentence = ("TEST Precision " "{:.3f}% | Recall {:.3f}% | Coverage {:.2f}%" .format(precision * 100, recall * 100, coverage * 100)) print(sentence) save_txt(sentence, data_paths.result_filepath, mode='a') return recall
3fa306d97d4db7cf5b321b6284c5ab75ff108845
3,647,963
import random import scipy def initialize_mean_variance(args): """Initialize the current mean and variance values semi-intelligently. Inspired by the kmeans++ algorithm: iteratively choose new centers from the data by weighted sampling, favoring points that are distant from those already chosen """ X = args.X.reshape(args.X.shape[0] * args.X.shape[1], args.X.shape[2]) # kmeans++ inspired choice centers = [random.choice(X)] min_dists = scipy.array([distance(centers[-1], x) for x in X]) for l in range(1, args.K): weights = min_dists * min_dists new_center = weighted_sample(zip(weights, X), 1).next() centers.append(new_center) min_dists = scipy.fmin(min_dists, scipy.array([distance(centers[-1], x) for x in X])) means = scipy.array(centers) # for the variance, get the variance of the data in this cluster variances = [] for c in centers: idxs = tuple(i for i, (x, m) in enumerate(zip(X, min_dists)) if distance(c, x) == m) v = scipy.var(X[idxs, :], axis=0) variances.append(v) variances = scipy.array(variances) + args.pseudocount #import pdb; pdb.set_trace() #for k in range(args.K): # print sp.sqrt(variances[k,:]) variances[variances < .1] = .1 return means, variances
98808bc7ab069c1ea7ca8e05b6dd27275d6c0f09
3,647,964
def verify_file_checksum(path, expected_checksum): """Verifies the sha256 checksum of a file.""" actual_checksum = calculate_file_checksum(path) return actual_checksum == expected_checksum
519d58892a122d5bc7850cb21ca047c152ef4183
3,647,965
import decimal def float_to_str(f, p=20): """ 将给定的float转换为字符串,而无需借助科学计数法。 @param f 浮点数参数 @param p 精读 """ if type(f) == str: f = float(f) ctx = decimal.Context(p) d1 = ctx.create_decimal(repr(f)) return format(d1, 'f')
551ab2f58b48e4005d8b5a85a7eb096e4e749d23
3,647,966
from typing import List def get_classes(parsed) -> List[ClassDef]: """Returns classes identified in parsed Python code.""" return [ element for element in parsed.body if isinstance(element, ClassDef) ]
e339899eb1dd039c9a708bf39f2fafa527d15882
3,647,967
import tqdm def create_index( corpus_f: str, model_name_or_path: str, output_f: str, mode: str = "sent2vec", batch_size: int = 64, use_cuda: bool = False, ): """Given a corpus file `corpus_f` and a sent2vec model `sent2vec_f`, convert the sentences in the corpus (line-by-line) to vector representations, normalise them (L2norm), and add them to a Flat FAISS index. Finally, save the index to `output_f`. :param corpus_f: path to the corpus file, with one sentence per line :param model_name_or_path: path to the binary sent2vec model (when mode=="sent2vec") or model name of the stransformer to use :param output_f: path to save the FAISS index to :param mode: whether to use "sent2vec" or "stransformers" (sentence-transformers) :param batch_size: batch_size to use to create sent2vec embeddings or sentence-transformers embeddings :param use_cuda: whether to use GPU when using sentence-transformers :return: the created FAISS index """ if not FAISS_AVAILABLE: raise ImportError( "Faiss not installed. Please install the right version before continuing. If you have a " "CUDA-enabled device and want to use GPU acceleration, you can `pip install faiss-gpu`." " Otherwise, install faiss-cpu. For more, see https://github.com/facebookresearch/faiss" ) if mode == "sent2vec": if not SENT2VEC_AVAILABLE: raise ImportError( "Requested 'sent2vec', but module not installed. Install the right version from" " https://github.com/epfml/sent2vec" ) try: model = sent2vec.Sent2vecModel() except AttributeError as exc: raise AttributeError( "'sent2vec' does not have attribute Sent2vecModel. You may have uninstalled an" " incorrect version of sent2vec. The correct version can be found here:" " https://github.com/epfml/sent2vec" ) from exc logger.info(f"Loading sent2vec model of {model_name_or_path}") model.load_model(model_name_or_path, inference_mode=True) hidden_size = model.get_emb_size() elif mode == "stransformers": if not STRANSFORMERS_AVAILABLE: raise ImportError( "Requested 'stransformers', but module not installed. Please install the library" " before continuing. https://github.com/UKPLab/sentence-transformers#installation" ) logger.info(f"Loading SentenceTransformer model {model_name_or_path}") model = SentenceTransformer(model_name_or_path, device="cuda" if use_cuda else "cpu") hidden_size = model.encode(["This is a test ."]).shape[1] else: raise ValueError("'mode' must be 'sent2vec' or 'stransformers'") logger.info(f"Creating empty index with hidden_size {hidden_size:,}...") # We want to do cosine similarity search, so we use inner product as suggested here: # https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances#how-can-i-index-vectors-for-cosine-similarity index = faiss.index_factory(hidden_size, "Flat", faiss.METRIC_INNER_PRODUCT) vecs = [] n_lines = get_n_lines(corpus_f) logger.info("Converting corpus into vectors. This can take a while...") batch = [] with open(corpus_f, encoding="utf-8") as fhin: for line_idx, line in tqdm(enumerate(fhin, 1), total=n_lines, unit="line"): line = line.rstrip() if line: batch.append(line) if len(batch) == batch_size or line_idx == n_lines: if mode == "sent2vec": # Normalize vectors for cosine distance as suggested here: # https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances#how-can-i-index-vectors-for-cosine-similarity vecs.extend(model.embed_sentences(batch)) else: vecs.extend(model.encode(batch, batch_size=batch_size, show_progress_bar=False)) batch = [] logger.info(f"Number of entries: {len(vecs)}") logger.info("Normalizing vectors...") sent_vecs = np.array(vecs) # normalize_L2 works in-place so do not assign faiss.normalize_L2(sent_vecs) logger.info("Adding vectors to index...") index.add(sent_vecs) logger.info(f"Saving index to {output_f}...") faiss.write_index(index, output_f) return index
b757d55ecf3001cad2ad285f476a391cb013d8f4
3,647,969
import importlib def import_module(name, path): """ correct way of importing a module dynamically in python 3. :param name: name given to module instance. :param path: path to module. :return: module: returned module instance. """ spec = importlib.util.spec_from_file_location(name, path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return module
d78dc5bc9d3a121c53bdd3bc44ad57378976eb28
3,647,971
def response_ssml_text_and_prompt(output, endsession, reprompt_text): """ create a Ssml response with prompt """ return { 'outputSpeech': { 'type': 'SSML', 'ssml': "<speak>" + output + "</speak>" }, 'reprompt': { 'outputSpeech': { 'type': 'SSML', 'ssml': "<speak>" + reprompt_text + "</speak>" } }, 'shouldEndSession': endsession }
7cfa6b245bb80a29b10f3b972d1e9eb68377e836
3,647,972
import re def getAreaQuantityQuantUnit(words): """ from training data: count perc cum_sum cum_perc kind_c hectare 7 58.333333 7 58.333333 acre 2 16.666667 9 75.000000 meter 1 8.333333 10 83.333333 square-foot 1 8.333333 11 91.666667 square-meter 1 8.333333 12 100.000000 """ allWords = ' '.join(words) unitKind = 'hectare' quant = None units = ['hectare', 'acre', 'euro', 'meter', 'square-foot', 'square-meter' ] for u in units: if u in allWords : unitKind=u; break if 'square foot' in allWords : unitKind='square-foot' if 'square feet' in allWords : unitKind='square-foot' if 'square meter' in allWords : unitKind='square-meter' m = re.search(r'([0-9,\.]+)', allWords.lower()) if m: quant = m.group(1) quant = quant.replace(",", "") quant = float(quant) if quant=='.': quant=None if not quant: q = text2int(allWords) if q: quant = q else: m = text2int(allWords) if m: quant *= m if not quant: quant = 1 quant = ('%f' % quant).rstrip('0').rstrip('.') return quant, unitKind #subGraph['attrDict_p'] = attrDict_p
10397a73042469a949fa6dbf70e8bba406cf510c
3,647,973
from typing import Optional from typing import List import pwd import grp def add_user( username: str, password: Optional[str] = None, shell: str = "/bin/bash", system_user: bool = False, primary_group: str = None, secondary_groups: List[str] = None, uid: int = None, home_dir: str = None, ) -> str: """Add a user to the system. Will log but otherwise succeed if the user already exists. Arguments: username: Username to create password: Password for user; if ``None``, create a system user shell: The default shell for the user system_user: Whether to create a login or system user primary_group: Primary group for user; defaults to username secondary_groups: Optional list of additional groups uid: UID for user being created home_dir: Home directory for user Returns: The password database entry struct, as returned by `pwd.getpwnam` """ try: if uid: user_info = pwd.getpwuid(int(uid)) logger.info("user '%d' already exists", uid) return user_info user_info = pwd.getpwnam(username) logger.info("user with uid '%s' already exists", username) return user_info except KeyError: logger.info("creating user '%s'", username) cmd = ["useradd", "--shell", shell] if uid: cmd.extend(["--uid", str(uid)]) if home_dir: cmd.extend(["--home", str(home_dir)]) if password: cmd.extend(["--password", password, "--create-home"]) if system_user or password is None: cmd.append("--system") if not primary_group: try: grp.getgrnam(username) primary_group = username # avoid "group exists" error except KeyError: pass if primary_group: cmd.extend(["-g", primary_group]) if secondary_groups: cmd.extend(["-G", ",".join(secondary_groups)]) cmd.append(username) check_output(cmd, stderr=STDOUT) user_info = pwd.getpwnam(username) return user_info
17e9cc717f5ff63e65df202e05de88e703a9cf03
3,647,974
def tokenize_and_align(tokenizer, words, cased=False): """Splits up words into subword-level tokens.""" words = ["[CLS]"] + list(words) + ["[SEP]"] basic_tokenizer = tokenizer.basic_tokenizer tokenized_words = [] for word in words: word = tokenization.convert_to_unicode(word) word = basic_tokenizer._clean_text(word) if word == "[CLS]" or word == "[SEP]": word_toks = [word] else: if not cased: word = word.lower() word = basic_tokenizer._run_strip_accents(word) word_toks = basic_tokenizer._run_split_on_punc(word) tokenized_word = [] for word_tok in word_toks: tokenized_word += tokenizer.wordpiece_tokenizer.tokenize(word_tok) tokenized_words.append(tokenized_word) assert len(tokenized_words) == len(words) return tokenized_words
d6bd3fa2523b0f2422d6d0c2c87ac2637462542a
3,647,975
def _vagrant_format_results(line): """Extract fields from vm status line. :param line: Status line for a running vm :type line: str :return: (<vm directory path>, <vm status>) :rtype: tuple of strings """ line_split = line.split() return (line_split[-1], line_split[-2],)
78788572e6b695696621775c28ae8b3a1e577ee3
3,647,976
def rect_to_xys(rect, image_shape): """Convert rect to xys, i.e., eight points The `image_shape` is used to to make sure all points return are valid, i.e., within image area """ h, w = image_shape[0:2] def get_valid_x(x): if x < 0: return 0 if x >= w: return w - 1 return x def get_valid_y(y): if y < 0: return 0 if y >= h: return h - 1 return y rect = ((rect[0], rect[1]), (rect[2], rect[3]), rect[4]) points = cv2.cv.BoxPoints(rect) points = np.int0(points) for i_xy, (x, y) in enumerate(points): x = get_valid_x(x) y = get_valid_y(y) points[i_xy, :] = [x, y] points = np.reshape(points, -1) return points
a706007dc1651f1b8ce3c35b355b5b02915158e9
3,647,977
from typing import Optional def determine_aws_service_name( request: Request, services: ServiceCatalog = get_service_catalog() ) -> Optional[str]: """ Tries to determine the name of the AWS service an incoming request is targeting. :param request: to determine the target service name of :param services: service catalog (can be handed in for caching purposes) :return: service name string (or None if the targeting service could not be determined exactly) """ signing_name, target_prefix, operation, host, path = _extract_service_indicators(request) candidates = set() # 1. check the signing names if signing_name: signing_name_candidates = services.by_signing_name(signing_name) if len(signing_name_candidates) == 1: # a unique signing-name -> service name mapping is the case for ~75% of service operations return signing_name_candidates[0] # try to find a match with the custom signing name rules custom_match = custom_signing_name_rules(signing_name, path) if custom_match: return custom_match # still ambiguous - add the services to the list of candidates candidates.update(signing_name_candidates) # 2. check the target prefix if target_prefix and operation: target_candidates = services.by_target_prefix(target_prefix) if len(target_candidates) == 1: # a unique target prefix return target_candidates[0] # still ambiguous - add the services to the list of candidates candidates.update(target_candidates) # exclude services where the operation is not contained in the service spec for service_name in list(candidates): service = services.get(service_name) if operation not in service.operation_names: candidates.remove(service_name) else: # exclude services which have a target prefix (the current request does not have one) for service_name in list(candidates): service = services.get(service_name) if service.metadata.get("targetPrefix") is not None: candidates.remove(service_name) if len(candidates) == 1: return candidates.pop() # 3. check the path if path: # iterate over the service spec's endpoint prefix for prefix, services_per_prefix in services.endpoint_prefix_index.items(): if path.startswith(prefix): if len(services_per_prefix) == 1: return services_per_prefix[0] candidates.update(services_per_prefix) # try to find a match with the custom path rules custom_path_match = custom_path_addressing_rules(path) if custom_path_match: return custom_path_match # 4. check the host (custom host addressing rules) if host: custom_host_match = custom_host_addressing_rules(host) if custom_host_match: return custom_host_match # 5. check the query / form-data values = request.values if "Action" in values and "Version" in values: # query / ec2 protocol requests always have an action and a version (the action is more significant) query_candidates = services.by_operation(values["Action"]) if len(query_candidates) == 1: return query_candidates[0] for service in list(query_candidates): service_model = services.get(service) if values["Version"] != service_model.api_version: # the combination of Version and Action is not unique, add matches to the candidates query_candidates.remove(service) if len(query_candidates) == 1: return query_candidates[0] candidates.update(query_candidates) # 6. check the legacy rules in the end legacy_match = legacy_rules(request) if legacy_match: return legacy_match LOG.warning("could not uniquely determine service from request, candidates=%s", candidates) if signing_name: return signing_name if candidates: return candidates.pop() return None
c29cc59324c3bf946cdfcd936b3f523feb657fda
3,647,978
from typing import Any async def async_get_config_entry_diagnostics( hass: HomeAssistant, entry: ConfigEntry ) -> dict[str, Any]: """Return diagnostics for a config entry.""" coordinator: DataUpdateCoordinator[Domain] = hass.data[DOMAIN][entry.entry_id] return { "creation_date": coordinator.data.creation_date, "expiration_date": coordinator.data.expiration_date, "last_updated": coordinator.data.last_updated, "status": coordinator.data.status, "statuses": coordinator.data.statuses, "dnssec": coordinator.data.dnssec, }
3aa4e17c646367721ffc266c31f66cd9f81c26fe
3,647,980
from typing import Dict def binary_to_single(param_dict: Dict[str, float], star_index: int) -> Dict[str, float]: """ Function for converting a dictionary with atmospheric parameters of a binary system to a dictionary of parameters for one of the two stars. Parameters ---------- param_dict : dict Dictionary with the atmospheric parameters of both stars. The keywords end either with ``_0`` or ``_1`` that correspond with ``star_index=0`` or ``star_index=1``. star_index : int Star index (0 or 1) that is used for the parameters in ``param_dict``. Returns ------- dict Dictionary with the parameters of the selected star. """ new_dict = {} for key, value in param_dict.items(): if star_index == 0 and key[-1] == "0": new_dict[key[:-2]] = value elif star_index == 1 and key[-1] == "1": new_dict[key[:-2]] = value elif key in ["teff", "logg", "feh", "c_o_ratio", "fsed", "radius", "distance"]: new_dict[key] = value return new_dict
21099162ffe83715892abf82660e35ee98e02930
3,647,981
def welcome(): """List all available api routes.""" return ( """Available Routes: /api/v1.0/precipitation Convert the query results to a dictionary using date as the key and prcp as the value. Return the JSON representation of your dictionary. /api/v1.0/stations Return a JSON list of stations from the dataset. /api/v1.0/tobs Return a JSON list of temperature observations (TOBS) for the previous year. /api/v1.0/start_date /api/v1.0/start_date/end_date Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range. When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date. When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive. """ )
83fcd43ff8dddd0596232dfb4420525bc592b583
3,647,982
import re def armenian_input_latin(field, text): """ Prepare a string from one of the query fields for subsequent processing: replace latin characters with Armenian equivalents. """ if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'): return text textTrans = '' for c in re.findall('.[\'_]+|.', text): try: c = dictLat2Arm[c] except KeyError: try: c = dictLat2Arm[c.lower()].upper() except KeyError: pass textTrans += c return textTrans
94764ec931a4469ea0dca39a70880b41345ab7cf
3,647,983
from typing import Union from typing import Tuple from typing import Callable from typing import Optional import types def df_style_cell(*styles: Union[ Tuple[Callable[['cell'], bool], 'style'], Tuple['cell', 'style'], Callable[['cell'], Optional['style']], ]) -> Callable[['cell'], 'style']: """ Shorthand for df.style.applymap(...). Example usage: df.style.applymap(df_style_cell( (lambda x: 0 < x < 1, 'color: red'), (0, 'color: green'), lambda x: 'background: %s' % to_rgb_hex(x), )) """ def f(x): y = None for style in styles: if isinstance(style, tuple) and isinstance(style[0], types.FunctionType) and style[0](x): y = style[1] elif isinstance(style, tuple) and x == style[0]: y = style[1] elif isinstance(style, types.FunctionType): y = style(x) if y: break return y or '' return f
e45d1b17ecd3bfe6bf05ba70e7ef0c8dc4b99a81
3,647,984
def bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)): """Forward transform that maps proposal boxes to predicted ground-truth boxes using bounding-box regression deltas. See bbox_transform_inv for a description of the weights argument. """ if boxes.shape[0] == 0: return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype) boxes = boxes.astype(deltas.dtype, copy=False) widths = boxes[:, 2] - boxes[:, 0] + 1.0 heights = boxes[:, 3] - boxes[:, 1] + 1.0 ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = weights dx = deltas[:, 0::4] / wx dy = deltas[:, 1::4] / wy dw = deltas[:, 2::4] / ww dh = deltas[:, 3::4] / wh # Prevent sending too large values into np.exp() dw = np.minimum(dw, np.log(1000. / 16.)) dh = np.minimum(dh, np.log(1000. / 16.)) pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis] pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis] pred_w = np.exp(dw) * widths[:, np.newaxis] pred_h = np.exp(dh) * heights[:, np.newaxis] pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype) # x1 pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # y1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # x2 (note: "- 1" is correct; don't be fooled by the asymmetry) pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1 # y2 (note: "- 1" is correct; don't be fooled by the asymmetry) pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1 return pred_boxes
90e4cb394a12cbb73ce0dea85557b8195f04a961
3,647,985
def get_item_tds(item_id): """ Method conntect to ILS to retrieve item information and generates an html table cells with the information. :param item_id: Item id :rtype: HTML string """ item_bot = ItemBot(opac_url=ils_settings.OPAC_URL,item_id=item_id) output_html = "<td>{0}</td><td>{1}</td><td>{2}</td>".format(item_bot.status(), item_bot.location(), item_bot.callnumber()) return mark_safe(output_html)
866e364257aae174a7ddecdaa94f5af1e9cbfcca
3,647,986
def build_data_request(mac, request_type='current', interval=1, units='english'): """ Creates RainWise API request for Recent Data based on station mac, format (optional), and units (optional) """ # Check if interval requested is valid interval if interval not in [1, 5, 10, 15, 30, 60]: raise ValueError('Invalid Request: Parameter interval must be 1, 5, 10, 15, 30, or 60') # Check if units requested are valid units if units.lower() not in ['english', 'metric']: raise ValueError('Invalid Request: Parameter units must be english or metric') # Build request URL for current conditions if request_type == 'current': return f'http://api.rainwise.net/main/v1.4/get-data.php?mac={mac}&format=json' # Build request URL for recent data elif request_type == 'recent': return f'http://api.rainwise.net/main/v1.4/get-recent.php?mac={mac}&interval={interval}&units={units}&format=json' raise ValueError('Invalid Request: Parameter request_type must be either ''current'' or ''recent''')
733c20f5c67fe2c630427bfb70ab563df111558c
3,647,988
def load_acs_access_to_car() -> pd.DataFrame: """Function to merge the two files for the QOL outputs and do some standard renaming. Because these are QOL indicators they remain in the same csv output with columns indicating year""" df_0812 = pd.read_excel( "./resources/ACS_PUMS/EDDT_ACS2008-2012.xlsx", sheet_name="ACS08-12", dtype={"Geog": str}, ) df_1519 = pd.read_excel( "./resources/ACS_PUMS/EDDT_ACS2015-2019.xlsx", sheet_name="ACS15-19", dtype={"Geog": str}, ) df = pd.merge(df_0812, df_1519, on="Geog", how="left") df = df.filter(regex="Geog|Wk16p|CWCar") df = df.replace( { "Geog": { "Bronx": "BX", "Brooklyn": "BK", "Manhattan": "MN", "Queens": "QN", "Staten Island": "SI", "NYC": "citywide", } } ) df.set_index("Geog", inplace=True) return df
fa6d606ebeef142417f1ac47c18947d0de08065b
3,647,989
def elastic_transform(image, alpha, sigma, random_state=None): """Elastic deformation of images as described in [Simard2003]_. .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003. """ assert len(image.shape) == 2 if random_state is None: random_state = np.random.RandomState(None) shape = image.shape dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij') indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1)) return map_coordinates(image, indices, order=1).reshape(shape)
e01660ecf753d7c33aa66786cf9f9db3b94cef49
3,647,991
import time def convert_epoch_to_mysql_timestamp(epoch_timestamp): """ Converts a given epoch timestamp in seconds to the MySQL datetime format. :param epoch_timestamp: The timestamp as seconds since epoch time :return: The MySQL timestamp string in the format 'Y-m-d HH:MM:SS' :rtype: str """ try: epoch = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(epoch_timestamp)) return epoch except Exception as e: print(e) return None
15647a816e638e7668e2e830ebc4f1c6fdb2f030
3,647,992
import six def _ensure_eventlet(func): """Decorator that verifies we have the needed eventlet components.""" @six.wraps(func) def wrapper(*args, **kwargs): if not _utils.EVENTLET_AVAILABLE or greenthreading is None: raise RuntimeError('Eventlet is needed to wait on green futures') return func(*args, **kwargs) return wrapper
4193b8d68ae45c13a3a88b1e4c7caba5572f16cf
3,647,993
def check_public_key(pk): """ Checks if a given string is a public (or at least if it is formatted as if it is). :param pk: ECDSA public key to be checked. :type pk: hex str :return: True if the key matches the format, raise exception otherwise. :rtype: bool """ prefix = pk[0:2] l = len(pk) if prefix not in ["02", "03", "04"]: raise Exception("Wrong public key format.") if prefix == "04" and l != 130: raise Exception( "Wrong length for an uncompressed public key: " + str(l)) elif prefix in ["02", "03"] and l != 66: raise Exception("Wrong length for a compressed public key: " + str(l)) else: return True
120b3e88a96db45e5e4df0996414448da8b84462
3,647,995
def empty_tree(input_list): """Recursively iterate through values in nested lists.""" for item in input_list: if not isinstance(item, list) or not empty_tree(item): return False return True
1dceb351aac4db23b57394a531db38a3edf61a8c
3,647,996