content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def doColorTransfer(org_content, output, raw_data, with_color_match = False): """ org_content path or 0-1 np array output path or 0-1 np array raw_data boolean | toggles input """ if not raw_data: org_content = imageio.imread(org_content, pilmode="RGB").astype(float)/256 output = imageio.imread(output, pilmode="RGB").astype(float)/256 org_content = skimage.transform.resize(org_content, output.shape) if with_color_match: output = match_color(output, org_content) org_content = rgb2luv(org_content) org_content[:,:,0] = output.mean(2) output = luv2rgb(org_content) output[output<0] = 0 output[output>1]=1 return output
8af9d8dc199fb4b111da619e4dbc935b2514c5b7
3,647,307
def tb_filename(tb): """Helper to get filename from traceback""" return tb.tb_frame.f_code.co_filename
75ac527b928d605f1dfc2b5034da6ab7e193fb82
3,647,308
async def fetch_symbol(symbol: str): """ get symbol info """ db = SessionLocal() s = db.query(SymbolSchema).filter(SymbolSchema.symbol == symbol).first() res = {"symbol": s.symbol, "name": s.name} return res
6e6a1e4e92ba7796f1893d3144cbdcc75bee6bbe
3,647,309
from typing import List def all_live_response_sessions(cb: CbResponseAPI) -> List: """List all LR sessions still in server memory.""" return [sesh for sesh in cb.get_object(f"{CBLR_BASE}/session")]
e4ea25c8d38e90e8048f6a5c220e5f413ce59da6
3,647,310
def unserialize_model_params(bin: bin): """Unserializes model or checkpoint or diff stored in db to list of tensors""" state = StatePB() state.ParseFromString(bin) worker = sy.VirtualWorker(hook=None) state = protobuf.serde._unbufferize(worker, state) model_params = state.tensors() return model_params
a1cad2172029b7e622486d50de73a7c87aaca9c0
3,647,311
def config(clazz): """Decorator allowing to transform a python object into a configuration file, and vice versa :param clazz: class to decorate :return: the decorated class """ return deserialize(serialize(dataclass(clazz)))
a5386d53c596b77355ee8e5067a0e1c8b4efb89e
3,647,312
import functools def wrap_method_once(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: """manage Runnable state for given method""" # we don't re-wrap methods that had the state management wrapper if hasattr(func, 'handler_wrapped'): return func @functools.wraps(func) def wrapped_runnable_method(*args, **kw): # check the first args, if it is self, otherwise call the wrapped function # we might wrapped a callable that is not a method if args and isinstance(args[0], Runnable): self, args = args[0], args[1:] return self._call_wrapped_method(func, *args, **kw) else: return func(*args, **kw) wrapped_runnable_method.handler_wrapped = True return wrapped_runnable_method
cf26f642f09fe5d4b54073eeda4ab7beb03ed538
3,647,313
from re import T async def all(iterable: ty.AsyncIterator[T]) -> bool: """Return ``True`` if **all** elements of the iterable are true (or if the iterable is empty). :param iterable: The asynchronous iterable to be checked. :type iterable: ~typing.AsyncIterator :returns: Whether all elements of the iterable are true or if the iterable is empty. :rtype: bool """ async for x in iter(iterable): if not x: return False return True
92c8c20d75318a0b0353ede9641c59d9337c60f6
3,647,314
def _safe_isnan(x): """Wrapper for isnan() so it won't fail on non-numeric values.""" try: return isnan(x) except TypeError: return False
7f4cb2e2f4c3e4ee9d66e6d8dbb73dcac5f343f0
3,647,315
def get_system( context, system_id = None ): """ Finds a system matching the given identifier and returns its resource Args: context: The Redfish client object with an open session system_id: The system to locate; if None, perform on the only system Returns: The system resource """ system_uri_pattern = "/redfish/v1/Systems/{}" avail_systems = None # If given an identifier, get the system directly if system_id is not None: system = context.get( system_uri_pattern.format( system_id ) ) # No identifier given; see if there's exactly one member else: avail_systems = get_system_ids( context ) if len( avail_systems ) == 1: system = context.get( system_uri_pattern.format( avail_systems[0] ) ) else: raise RedfishSystemNotFoundError( "Service does not contain exactly one system; a target system needs to be specified: {}".format( ", ".join( avail_systems ) ) ) # Check the response and return the system if the response is good try: verify_response( system ) except: if avail_systems is None: avail_systems = get_system_ids( context ) raise RedfishSystemNotFoundError( "Service does not contain a system called {}; valid systems: {}".format( system_id, ", ".join( avail_systems ) ) ) from None return system
7127a5cf0df89ba5832b7328746d29ef34c12c3e
3,647,316
import math def cartesian_to_polar(x, y, xorigin=0.0, yorigin=0.0): """ Helper function to convert Cartesian coordinates to polar coordinates (centred at a defined origin). In the polar coordinates, theta is an angle measured clockwise from the Y axis. :Parameters: x: float X coordinate of point y: float Y coordinate of point xorigin: float (optional) X coordinate of origin (if not zero) yorigin: float (optional) Y coordinate of origin (if not zero) :Returns: (r, theta): tuple of 2 floats Polar coordinates of point. NOTE: theta is in radians. """ xdiff = float(x) - float(xorigin) ydiff = float(y) - float(yorigin) distsq = (xdiff * xdiff) + (ydiff * ydiff) r = math.sqrt(distsq) theta = PIBY2 - math.atan2(ydiff, xdiff) # Adjust theta to be in the range 0 - 2*PI while theta < 0.0: theta += PI2 while theta > PI2: theta -= PI2 return (r, theta)
3fcbb0fc18b9c8d07bc1394995600fd354ee3c75
3,647,317
def keypoint_dict_to_struct(keypoint_dict): """ parse a keypoint dictionary form into a keypoint info structure """ keypoint_info_struct = KeypointInfo() if 'keypoints' in keypoint_dict: for k in keypoint_dict['keypoints']: keypoint = keypoint_info_struct.keypoints.add() keypoint.xloc = k[0] keypoint.yloc = k[1] if 'jacobians' in keypoint_dict: for j in keypoint_dict['jacobians']: jacobian = keypoint_info_struct.jacobians.add() jacobian.d11 = j[0][0] jacobian.d12 = j[0][1] jacobian.d21 = j[1][0] jacobian.d22 = j[1][1] keypoint_info_struct.pts = keypoint_dict['pts'] keypoint_info_struct.index = keypoint_dict['index'] return keypoint_info_struct
d805e6fbac9df7a3a8d7d7f73a03f63a2a4149d1
3,647,318
def softmax_regression(img): """ 定义softmax分类器: 只通过一层简单的以softmax为激活函数的全连接层,可以得到分类的结果 Args: img -- 输入的原始图像数据 Return: predict -- 分类的结果 """ predict = paddle.layer.fc( input=img, size=10, act=paddle.activation.Softmax()) return predict
0c522589a2130bbba512d557695f432eea50ef48
3,647,319
from typing import Optional from typing import Union from typing import Tuple def incremental_quality( wavelength: ndarray, flux: ndarray, *, mask: Optional[Union[Quantity, ndarray]] = None, percent: Union[int, float] = 10, **kwargs, ) -> Tuple[ndarray, ndarray]: """Determine spectral quality in incremental sections. Parameters ---------- wavelength: array-like or Quantity Wavelength of spectrum. flux: array-like or Quantity Flux of spectrum. mask: array-like, Quantity or None Pixel weight mask. percent: Union[int, float] (default=10) The percent size of chunk around each wavelength position. kwargs: Extra arguments passed onto quality() (including mask). Returns ------- x: ndarray Central wavelength values of each section. q: ndarray Spectral quality for each section. """ positions = log_chunks(wavelength, percent) qualities = [] for pos1, pos2 in zip(positions[:-1], positions[1:]): pos_mask = (wavelength >= pos1) & (wavelength < pos2) if np.sum(pos_mask) <= 1: # 1 or less points in this section continue x = wavelength[pos_mask] y = flux[pos_mask] if mask is not None: z = mask[pos_mask] else: z = mask # None try: q = quality(x, y, mask=z, **kwargs) except: q = np.nan qualities.append([np.nanmean(x), q]) x, q = np.asarray(qualities).T return x, q
69f7966af1bec5cde40d62e42346a28475c120f6
3,647,320
import platform def get_platform(): """Gets the users operating system. Returns: An `int` representing the users operating system. 0: Windows x86 (32 bit) 1: Windows x64 (64 bit) 2: Mac OS 3: Linux If the operating system is unknown, -1 will be returned. """ return defaultdict(lambda: -1, { "Windows": 1 if platform.machine().endswith("64") else 0, "Darwin": 2, "Linux:": 3, })[platform.system()]
2d77b76e7a7010dc1f104e83a7a042b7b7542954
3,647,321
def count_number_of_digits_with_unique_segment_numbers(displays: str) -> int: """Counts the number of 1, 4, 7 or 8s in the displays.""" displays = [Display.from_string(d) for d in displays.splitlines()] num_digits = 0 for display in displays: num_digits += sum(len(c) in (2, 3, 4, 7) for c in display.output_value) return num_digits
df9731371066cc89445fd5eeb94f40624cf24a2f
3,647,322
def svo_filter_url(telescope, photofilter, zeropoint='AB'): """ Returns the URL where the filter transmission curve is hiding. Requires arguments: telescope: SVO-like name of Telescope/Source of photometric system. photofilter: SVO-like name of photometric filter. Optional: zeropoint: String. Either 'AB', 'Vega', or 'ST'. Output: url: URL of the relevant file. """ url = 'http://svo2.cab.inta-csic.es/theory/fps3/fps.php?' + \ 'PhotCalID=' + telescope + '/' + photofilter + '/' + zeropoint return url
e3cbe6a3192fcc890fb15df8fc3c02620a7c69fb
3,647,323
def calculate_sampling_rate(timestamps): """ Parameters ---------- x : array_like of timestamps, float (unit second) Returns ------- float : sampling rate """ if isinstance(timestamps[0], float): timestamps_second = timestamps else: try: v_parse_datetime = np.vectorize(parse_datetime) timestamps = v_parse_datetime(timestamps) timestamps_second = [] timestamps_second.append(0) for i in range(1, len(timestamps)): timestamps_second.append((timestamps[i] - timestamps[ i - 1]).total_seconds()) except Exception: sampling_rate = None pass steps = np.diff(timestamps_second) sampling_rate = round(1 / np.min(steps[steps != 0])) return sampling_rate
0554866b55eb9310c64399819d8d3978108d5e1a
3,647,324
def compute_ss0(y, folds): """ Compute the sum of squares based on null models (i.e., always predict the average `y` of the training data). Parameters ---------- y : ndarray folds : list of ndarray Each element is an ndarray of integers, which are the indices of members of the fold. Returns ------- ss0 : float The sum of squares based on null models. """ yhat0 = np.zeros_like(y) for test_idx in folds: m = np.ones_like(y, dtype=bool) m[test_idx] = False yhat0[test_idx] = y[m].mean() ss0 = np.sum((y - yhat0)**2) return ss0
e546404eaa2637cc8073ddc0654f42987a07d972
3,647,325
import glob def read_logging_data(folder_path): """ Description:\n This function reads all csv files in the folder_path folder into one dataframe. The files should be in csv format and the name of each file should start with 'yrt' and contain '_food_data' in the middle. For example, yrt1999_food_data123.csv is a valid file name that would be read into the dataframe if it exists in the folder_path folder. Input:\n - folder_path(string) : path to the folder that contain the data. Output:\n - a dataframe contains all the csv files in the folder given. """ data_lst = glob.glob('{}/yrt*_food_data*.csv'.format(folder_path)) dfs = [] for x in data_lst: dfs.append(pd.read_csv(x)) df = pd.concat(dfs) return df.reset_index(drop=True)
964ad379eb70083b6a696655b22c77a2c61d101f
3,647,326
def get_user_strlist_options(*args): """ get_user_strlist_options(out) """ return _ida_kernwin.get_user_strlist_options(*args)
71c637c1d663d685ffd7e5fd5bba8cba04b18535
3,647,327
import torch def coord_sampler(img, coords): """ Sample img batch at integer (x,y) coords img: [B,C,H,W], coords: [B,2,N] returns: [B,C,N] points """ B,C,H,W = img.shape N = coords.shape[2] batch_ref = torch.meshgrid(torch.arange(B), torch.arange(N))[0] out = img[batch_ref, :, coords[:,1,:], coords[:,0,:]] return out.permute(0,2,1)
d4a1ac6125d11381933d59190074f33bd9a7e774
3,647,328
def adapt_ListOfX(adapt_X): """This will create a multi-column adapter for a particular type. Note that the type must itself need to be in array form. Therefore this function serves to seaprate out individual lists into multiple big lists. E.g. if the X adapter produces array (a,b,c) then this adapter will take an list of Xs and produce a master array: ((a1,a2,a3),(b1,b2,b3),(c1,c2,c3)) Takes as its argument the adapter for the type which must produce an SQL array string. Note that you should NOT put the AsIs in the adapt_X function. The need for this function arises from the fact that we may want to actually handle list-creating types differently if they themselves are in a list, as in the example above, we cannot simply adopt a recursive strategy. Note that master_list is the list representing the array. Each element in the list will represent a subarray (column). If there is only one subarray following processing then the outer {} are stripped to give a 1 dimensional array. """ def adapter_function(param): if not AsIs: raise ImportError('There was a problem importing psycopg2.') param = param.value result_list = [] for element in param: # Where param will be a list of X's result_list.append(adapt_X(element)) test_element = result_list[0] num_items = len(test_element.split(",")) master_list = [] for x in range(num_items): master_list.append("") for element in result_list: element = element.strip("{").strip("}") element = element.split(",") for x in range(num_items): master_list[x] = master_list[x] + element[x] + "," if num_items > 1: master_sql_string = "{" else: master_sql_string = "" for x in range(num_items): # Remove trailing comma master_list[x] = master_list[x].strip(",") master_list[x] = "{" + master_list[x] + "}" master_sql_string = master_sql_string + master_list[x] + "," master_sql_string = master_sql_string.strip(",") if num_items > 1: master_sql_string = master_sql_string + "}" return AsIs("'{}'".format(master_sql_string)) return adapter_function
15a6ada60c3b78110097c29222c245225a2669b9
3,647,330
def add_sighting(pokemon_name): """Add new sighting to a user's Pokédex.""" user_id = session.get('user_id') user = User.query.get_or_404(user_id) pokemon = Pokemon.query.filter_by(name=pokemon_name).first_or_404() # 16% chance logging a sighting of a Pokémon with ditto_chance = True will # instead be logged as a Ditto # Through manual spamming I tested this, and it does work! if pokemon.chance_of_ditto(): pokemon = Pokemon.query.filter_by(name='Ditto').first_or_404() pokemon_id = pokemon.pokemon_id user_sighting = Sighting.query.filter((Sighting.user_id == user_id) & (Sighting.pokemon_id == pokemon_id)).one_or_none() # Ensuring unique Pokémon only in a user's sightings if user_sighting is None: new_sighting = Sighting(user_id=user_id, pokemon_id=pokemon_id) new_sighting.save() flash('Professor Willow: Wonderful! Your work is impeccable. Keep up the good work!') return redirect(f'/user/{user_id}') else: flash('Professor Willow: You\'ve already seen this Pokémon!') return redirect(f'/user/{user_id}')
cbc078b71d31bb40f582731341aefd89e23f26dc
3,647,331
def edit_colors_names_group(colors, names): """ idx map to colors and its names. names index is 1 increment up based on new indexes (only 0 - 7) """ # wall colors[0] = np.array([120, 120, 120], dtype = np.uint8) names[1] = 'wall' # floor colors[1] = np.array([80, 50, 50], dtype = np.uint8) names[2] = 'floor' # plant colors[2] = np.array([4, 200, 3], dtype = np.uint8) names[3] = 'plant' # ceiling colors[3] = np.array([120, 120, 80], dtype = np.uint8) names[4] = 'ceiling' # furniture colors[4] = np.array([204, 5, 255], dtype = np.uint8) names[5] = 'furniture' # person colors[5] = np.array([150, 5, 61], dtype = np.uint8) names[6] = 'person' # door colors[6] = np.array([8, 255, 51], dtype = np.uint8) names[7] = 'door' # objects colors[7] = np.array([6, 230, 230], dtype = np.uint8) names[8] = 'objects' return colors, names
45e6977b2ff4339417900d31d8bd22e9c5d934d8
3,647,332
def semidoc_mass_dataset(params, file_names, num_hosts, num_core_per_host, seq_len, num_predict, is_training, use_bfloat16=False, num_threads=64, record_shuffle_size=256, sequence_shuffle_size=2048): # pylint: disable=g-doc-args """Get semi-doc level mass dataset. Notes: - Each sequence comes from the same document (except for boundary cases). This is different from the standard sent-level mass dataset. - No consecutivity is ensured across batches, which is different from the standard doc-level mass dataset. - Effectively, semi-doc dataset maintains short range (seq_len) dependency, which is more random than doc-level and less random than sent-level. Returns: a tf.data.Dataset """ # pylint: enable=g-doc-args bsz_per_core = params["batch_size"] if num_hosts > 1: host_id = params["context"].current_host else: host_id = 0 ##### Split input files across hosts if len(file_names) >= num_hosts: file_paths = file_names[host_id::num_hosts] else: file_paths = file_names tf.logging.info("Host %d handles %d files:", host_id, len(file_paths)) ##### Parse records dataset = tf.data.Dataset.from_tensor_slices(file_paths) dataset = parse_record(dataset=dataset, parser=get_record_parser(), is_training=is_training, num_threads=num_threads, file_shuffle_size=len(file_paths), record_shuffle_size=record_shuffle_size) # process dataset dataset = mass_process(dataset, seq_len, num_predict, use_bfloat16) # Sequence level shuffle if is_training and sequence_shuffle_size: tf.logging.info("Seqeunce level shuffle with size %d", sequence_shuffle_size) dataset = dataset.shuffle(buffer_size=sequence_shuffle_size) # batching dataset = dataset.batch(bsz_per_core, drop_remainder=True) # Prefetch dataset = dataset.prefetch(num_core_per_host) return dataset
9c5809268931c1235bdd32c40f89781b05f387ae
3,647,333
def ListChrootSnapshots(buildroot): """Wrapper around cros_sdk --snapshot-list.""" cmd = ['cros_sdk', '--snapshot-list'] cmd_snapshots = RunBuildScript( buildroot, cmd, chromite_cmd=True, stdout=True) return cmd_snapshots.output.splitlines()
5ae25b11dd0dba39834411a05c095fd38ea70494
3,647,334
def nmad_filter( dh_array: np.ndarray, inlier_mask: np.ndarray, nmad_factor: float = 5, max_iter: int = 20, verbose: bool = False ) -> np.ndarray: """ Iteratively remove pixels where the elevation difference (dh_array) in stable terrain (inlier_mask) is larger \ than nmad_factor * NMAD. Iterations will stop either when the NMAD change is less than 0.1, or after max_iter iterations. :params dh_array: 2D array of elevation difference. :params inlier_mask: 2D boolean array of areas to include in the analysis (inliers=True). :param nmad_factor: The factor by which the stable dh NMAD has to be multiplied to calculate the outlier threshold :param max_iter: Maximum number of iterations (normally not reached, just for safety) :param verbose: set to True to print some statistics to screen. :returns: 2D boolean array with updated inliers set to True """ # Mask unstable terrain dh_stable = dh_array.copy() dh_stable.mask[~inlier_mask] = True nmad_before = xdem.spatialstats.nmad(dh_stable) if verbose: print(f"NMAD before: {nmad_before:.2f}") print("Iteratively remove large outliers") # Iteratively remove large outliers for i in range(max_iter): outlier_threshold = nmad_factor * nmad_before dh_stable.mask[np.abs(dh_stable) > outlier_threshold] = True nmad_after = xdem.spatialstats.nmad(dh_stable) if verbose: print(f"Remove pixels where abs(value) > {outlier_threshold:.2f} -> New NMAD: {nmad_after:.2f}") # If NMAD change is loweer than a set threshold, stop iterations, otherwise stop after max_iter if nmad_before - nmad_after < 0.1: break nmad_before = nmad_after return ~dh_stable.mask
74275be5223531bc7cfc3f788cb562104514996c
3,647,337
def setup_output_vcf(outname, t_vcf): """ Create an output vcf.Writer given the input vcf file as a templte writes the full header and Adds info fields: sizeCat MEF Returns a file handler and a dict with {individual_id: column in vcf} """ out = open(outname, 'w') line = t_vcf.readline() samp_columns = {} while not line.startswith("#CHROM"): out.write(line) line = t_vcf.readline() # edit the header out.write('##INFO=<ID=sizeCat,Number=A,Type=String,Description="Size category of variant">\n') out.write('##INFO=<ID=MEF,Number=.,Type=String,Description="Names of families that contain mendelian error">\n') out.write(line) for pos, iid in enumerate(line.strip().split('\t')[9:]): samp_columns[iid] = pos + 9 return out, samp_columns
82870c9c8d46dbe3161c434a87fac9108ed644b2
3,647,338
def validate_processing_hooks(): """Validate the enabled processing hooks. :raises: MissingHookError on missing or failed to load hooks :raises: RuntimeError on validation failure :returns: the list of hooks passed validation """ hooks = [ext for ext in processing_hooks_manager()] enabled = set() errors = [] for hook in hooks: deps = getattr(hook.obj, 'dependencies', ()) missing = [d for d in deps if d not in enabled] if missing: errors.append('Hook %(hook)s requires the following hooks to be ' 'enabled before it: %(deps)s. The following hooks ' 'are missing: %(missing)s.' % {'hook': hook.name, 'deps': ', '.join(deps), 'missing': ', '.join(missing)}) enabled.add(hook.name) if errors: raise RuntimeError("Some hooks failed to load due to dependency " "problems:\n%s" % "\n".join(errors)) return hooks
2d76fb003a3e960c86e342f057ee5a9ea0a77def
3,647,340
def rnn_stability_loss(rnn_output, beta): """ REGULARIZING RNNS BY STABILIZING ACTIVATIONS https://arxiv.org/pdf/1511.08400.pdf :param rnn_output: [time, batch, features] :return: loss value """ if beta == 0.0: return 0.0 # [time, batch, features] -> [time, batch] l2 = tf.sqrt(tf.reduce_sum(tf.square(rnn_output), axis=-1)) # [time, batch] -> [] return beta * tf.reduce_mean(tf.square(l2[1:] - l2[:-1]))
55dcc6461c7dcc683ae5a3b2d642ed9172616c8b
3,647,341
def xproto_fields(m, table): """ Generate the full list of models for the xproto message `m` including fields from the classes it inherits. Inserts the special field "id" at the very beginning. Each time we descend a new level of inheritance, increment the offset field numbers by 100. The base class's fields will be numbered from 1-99, the first descendant will be number 100-199, the second descdendant numbered from 200-299, and so on. This assumes any particular model as at most 100 fields. """ model_fields = [x.copy() for x in m["fields"]] for field in model_fields: field["accessor"] = m["fqn"] fields = xproto_base_fields(m, table) + model_fields # The "id" field is a special field. Every model has one. Put it up front and pretend it's part of the if not fields: raise Exception( "Model %s has no fields. Check for missing base class." % m["name"] ) id_field = { "type": "int32", "name": "id", "options": {}, "id": "1", "accessor": fields[0]["accessor"], } fields = [id_field] + fields # Walk through the list of fields. They will be in depth-first search order from the base model forward. Each time # the model changes, offset the protobuf field numbers by 100. offset = 0 last_accessor = fields[0]["accessor"] for field in fields: if field["accessor"] != last_accessor: last_accessor = field["accessor"] offset += 100 field_id = int(field["id"]) if (field_id < 1) or (field_id >= 100): raise Exception( "Only field numbers from 1 to 99 are permitted, field %s in model %s" % (field["name"], field["accessor"]) ) field["id"] = int(field["id"]) + offset # Check for duplicates fields_by_number = {} for field in fields: id = field["id"] dup = fields_by_number.get(id) if dup: raise Exception( "Field %s has duplicate number %d with field %s in model %s" % (field["name"], id, dup["name"], field["accessor"]) ) fields_by_number[id] = field return fields
fffcbb99cf7ff851dde77fee3f16046e0e71582d
3,647,343
def create_decomp_expand_fn(custom_decomps, dev, decomp_depth=10): """Creates a custom expansion function for a device that applies a set of specified custom decompositions. Args: custom_decomps (Dict[Union(str, qml.operation.Operation), Callable]): Custom decompositions to be applied by the device at runtime. dev (qml.Device): A quantum device. decomp_depth: The maximum depth of the expansion. Returns: Callable: A custom expansion function that a device can call to expand its tapes within a context manager that applies custom decompositions. **Example** Suppose we would like a custom expansion function that decomposes all CNOTs into CZs. We first define a decomposition function: .. code-block:: python def custom_cnot(wires): return [ qml.Hadamard(wires=wires[1]), qml.CZ(wires=[wires[0], wires[1]]), qml.Hadamard(wires=wires[1]) ] We then create the custom function (passing a device, in order to pick up any additional stopping criteria the expansion should have), and then register the result as a custom function of the device: >>> custom_decomps = {qml.CNOT : custom_cnot} >>> expand_fn = qml.transforms.create_decomp_expand_fn(custom_decomps, dev) >>> dev.custom_expand(expand_fn) """ custom_op_names = [op if isinstance(op, str) else op.__name__ for op in custom_decomps.keys()] # Create a new expansion function; stop at things that do not have # custom decompositions, or that satisfy the regular device stopping criteria custom_fn = qml.transforms.create_expand_fn( decomp_depth, stop_at=qml.BooleanFn(lambda obj: obj.name not in custom_op_names), device=dev, ) # Finally, we set the device's custom_expand_fn to a new one that # runs in a context where the decompositions have been replaced. def custom_decomp_expand(self, circuit, max_expansion=decomp_depth): with _custom_decomp_context(custom_decomps): return custom_fn(circuit, max_expansion=max_expansion) return custom_decomp_expand
f7a4682bae3b520dcce87e715834439311e7d8b6
3,647,344
from typing import Dict from typing import List def get_lats_map(floor: float, ceil: float) -> Dict[int, List[float]]: """ Get map of lats in full minutes with quarter minutes as keys. Series considers lat range is [-70;69] and how objects are stored in s3. """ full = full_minutes([floor, ceil]) out = {d: [d + dd for dd in OBJ_COORD_PARTS] for d in full[1:-1]} start = full[0] out[start] = [start + d for d in OBJ_COORD_PARTS if start + d >= floor] end = full[-1] out[end] = [end + d for d in OBJ_COORD_PARTS if end + d <= ceil] return {k: d for k, d in out.items() if -70 <= k < 70}
e2bc82e509372d0ed20483a7ac97c5bbfbc9e2d5
3,647,345
from typing import Dict from typing import Any def format_dict(body: Dict[Any, Any]) -> str: """ Formats a dictionary into a multi-line bulleted string of key-value pairs. """ return "\n".join( [f" - {k} = {getattr(v, 'value', v)}" for k, v in body.items()] )
b3f66d086284772e6783b8281f4d46c3dd6c237d
3,647,346
def block3(x, filters, kernel_size=3, stride=1, groups=32, conv_shortcut=True, name=None): """A residual block. # Arguments x: input tensor. filters: integer, filters of the bottleneck layer. kernel_size: default 3, kernel size of the bottleneck layer. stride: default 1, stride of the first layer. groups: default 32, group size for grouped convolution. conv_shortcut: default True, use convolution shortcut if True, otherwise identity shortcut. name: string, block label. # Returns Output tensor for the residual block. """ bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1 if conv_shortcut is True: shortcut = Conv2D((64 // groups) * filters, 1, strides=stride, use_bias=False, name=name + '_0_conv')(x) shortcut = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut) else: shortcut = x x = Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x) x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x) x = Activation('relu', name=name + '_1_relu')(x) c = filters // groups x = ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x) x = DepthwiseConv2D(kernel_size, strides=stride, depth_multiplier=c, use_bias=False, name=name + '_2_conv')(x) print x_shape = backend.int_shape(x)[1:-1] x = Reshape(x_shape + (groups, c, c))(x) output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None x = Lambda(lambda x: sum([x[:, :, :, :, i] for i in range(c)]), output_shape=output_shape, name=name + '_2_reduce')(x) x = Reshape(x_shape + (filters,))(x) x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x) x = Activation('relu', name=name + '_2_relu')(x) x = Conv2D((64 // groups) * filters, 1, use_bias=False, name=name + '_3_conv')(x) x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x) x = Add(name=name + '_add')([shortcut, x]) x = Activation('relu', name=name + '_out')(x) return x
e04c2bd5662991203f5b3d86652d2469e06bd346
3,647,347
import time def random_sleep(n): """io""" time.sleep(2) return n
f626a2e64a266084f33a78dd9be4e4528ff88934
3,647,348
def LeastSquare_nonlinearFit_general( X, Y, func, func_derv, guess, weights=None, maxRelativeError=1.0e-5, maxIteratitions=100, ): """Computes a non-linear fit using the least square method following: http://ned.ipac.caltech.edu/level5/Stetson/Stetson2_2_1.html It takes the following arguments: X - array of x-parameters Y - y-array func - the function f(X,parameters) func_derv - a function that returns a 2D array giving along the columns the derivatives of the function 'f' with respect to each fit parameter ( df /dp_i ) guess - a first guess for the fit parameters weights - the weights associated to each point maxRelativeError - stop the iteration once the error in each parameter is below this threshold maxIterations - stop the iteration after this many steps Returns: parameters, fit_error, chi_square, noPoints, succes (True if successful) """ functionName = "'analysis.LeastSquare_nonlinearFit_general'" if weights is None: weights = Y.copy() weights[:] = 1 noPoints = Y.size # number of data points used for the fit noParams = len(guess) # number of fit parameters # iterate starting with the initial guess until finding the best fit parameters a = guess iteration, notConverged = 0, True while iteration < maxIteratitions and notConverged: tempX = func_derv( X, a ) # the derivatives for the current parameter values tempDiff = Y - func( X, a ) # the difference between function values and Y-values std = (weights * tempDiff ** 2).sum() # the current sum of the squares step = np.linalg.lstsq(tempX, tempDiff)[0] while True: a2 = a + step tempStd = ( weights * (Y - func(X, a2)) ** 2 ).sum() # the sum of the squares for the new parameter values if tempStd > std: step /= ( 2.0 ) # wrong estimate for the step since it increase the deviation from Y values; decrease step by factor of 2 else: a += step break if (np.abs(step / a) < maxRelativeError).all(): notConverged = False # the iteration has converged iteration += 1 print(iteration, a, step, std, tempStd) # compute the standard deviation for the best fit parameters derivatives = func_derv(X, a) M = np.zeros((noParams, noParams), np.float64) for i in range(noParams): for j in range(i, noParams): M[i, j] = (weights * derivatives[:, i] * derivatives[:, j]).sum() M[j, i] = M[i, j] Minv = np.linalg.inv(M) chiSquare = (weights * (Y - func(X, a)) ** 2).sum() / ( noPoints - noParams ) # fit residuals a_error = np.zeros(noParams, np.float64) for i in range(noParams): a_error[i] = (chiSquare * Minv[i, i]) ** 0.5 return a, a_error, chiSquare, noPoints, iteration < maxIteratitions
255c5d11607aa13d1ab7d7ea28fbcd95396669e0
3,647,350
def _get_extracted_csv_table(relevant_subnets, tablename, input_path, sep=";"): """ Returns extracted csv data of the requested SimBench grid. """ csv_table = read_csv_data(input_path, sep=sep, tablename=tablename) if tablename == "Switch": node_table = read_csv_data(input_path, sep=sep, tablename="Node") bus_bus_switches = set(get_bus_bus_switch_indices_from_csv(csv_table, node_table)) else: bus_bus_switches = {} extracted_csv_table = _extract_csv_table_by_subnet(csv_table, tablename, relevant_subnets, bus_bus_switches=bus_bus_switches) return extracted_csv_table
81368b79bb737c24d4f614ebd8aff76ff9efcac0
3,647,351
def has_prefix(sub_s): """ :param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid. :return: (bool) If there is any words with prefix stored in sub_s. """ for word in vocabulary_list: if word.strip().startswith(sub_s): return True # Check all vocabularies in dictionary.txt, if there is no vocabulary start with sub_s then return false. return False
4a99a3437a954173a7f4fe87ccd8970a59632aa8
3,647,352
def work_out_entity(context,entity): """ One of Arkestra's core functions """ # first, try to get the entity from the context entity = context.get('entity', None) if not entity: # otherwise, see if we can get it from a cms page request = context['request'] if request.current_page: entity = entity_for_page(request.current_page) else: # we must be in a plugin, either in the page or in admin page = context['plugin'].get("page", None) if page: entity = entity_for_page(page) else: entity = None return entity
8cc1039d8611aa03d8e2d1f708339d87845d0381
3,647,353
def rectify(link:str, parent:str, path:str): """A function to check a link and verify that it should be captured or not. For e.g. any external URL would be blocked. It would also take care that all the urls are properly formatted. Args: **link (str)**: the link to rectify. **parent (str)**: the complete url of the page from which the link was found. **path (str)**: the path (after the domain) of the page from which the link was found. Returns: **str**: the properly formatted link. """ if (link.startswith("#")) or (":" in link) or ("../" in link): return path if not link.startswith("/"): if parent.endswith("/"): if not path.endswith("/"): path += "/" return path + link else: path = "/".join(path.split("/")[:-1])+"/" return path + link return link
6ca5771fcbbb35fe6d99bab65082d447299bb93a
3,647,354
def nested_pids_and_relations(app, db): """Fixture for a nested PIDs and the expected serialized relations.""" # Create some PIDs and connect them into different nested PID relations pids = {} for idx in range(1, 12): pid_value = str(idx) p = PersistentIdentifier.create('recid', pid_value, object_type='rec', status=PIDStatus.REGISTERED) pids[idx] = p VERSION = resolve_relation_type_config('version').id # 1 (Version) # / | \ # 2 3 4 PIDRelation.create(pids[1], pids[2], VERSION, 0) PIDRelation.create(pids[1], pids[3], VERSION, 1) PIDRelation.create(pids[1], pids[4], VERSION, 2) # Define the expected PID relation tree for of the PIDs expected_relations = {} expected_relations[4] = { u'relations': { 'version': [ {u'children': [{u'pid_type': u'recid', u'pid_value': u'2'}, {u'pid_type': u'recid', u'pid_value': u'3'}, {u'pid_type': u'recid', u'pid_value': u'4'}], u'index': 2, u'is_child': True, u'previous': {'pid_type': 'recid', 'pid_value': '3'}, u'next': None, u'is_last': True, u'is_parent': False, u'parent': {u'pid_type': u'recid', u'pid_value': u'1'}, u'type': 'version' } ], } } return pids, expected_relations
b8039318ffa96f5f42cc7398398b761febf06349
3,647,355
def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = line.expandtabs() return len(expline) - len(expline.lstrip())
c1f307adfeb2c1ec51c5e926a0b87dd3841e1aff
3,647,356
import warnings def read_idl_catalog(filename_sav, expand_extended=True): """ Read in an FHD-readable IDL .sav file catalog. Deprecated. Use `SkyModel.read_fhd_catalog` instead. Parameters ---------- filename_sav: str Path to IDL .sav file. expand_extended: bool If True, return extended source components. Default: True Returns ------- :class:`pyradiosky.SkyModel` """ warnings.warn( "This function is deprecated, use `SkyModel.read_fhd_catalog` instead. " "This function will be removed in version 0.2.0.", category=DeprecationWarning, ) skyobj = SkyModel() skyobj.read_fhd_catalog( filename_sav, expand_extended=expand_extended, ) return skyobj
77680e111c9f67b628cd1da5eac26afcb036374b
3,647,360
import struct def guid2bytes(s): """Converts a GUID to the serialized bytes representation""" assert isinstance(s, str) assert len(s) == 36 p = struct.pack return b"".join([ p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)), p(">H", int(s[19:23], 16)), p(">Q", int(s[24:], 16))[2:], ])
f298497173f9011392b671267cb47f081d25a9da
3,647,361
import functools def config(config_class=None, name="config", group=None): """ Class decorator that registers a custom configuration class with `Hydra's ConfigStore API <https://hydra.cc/docs/tutorials/structured_config/config_store>`_. If defining your own custom configuration class, your class must do the following: * Register with `Hydra's ConfigStore API <https://hydra.cc/docs/tutorials/structured_config/config_store>`_ (which this decorator does for you). * Register as a `@dataclass <https://docs.python.org/3/library/dataclasses.html>`_. Example:: @config(name="db") @dataclass class DBConfig(BaseConfig): host: str = "localhost" .. note:: Make sure @dataclass comes after @config. This also supports `Hydra Config Groups <https://hydra.cc/docs/tutorials/structured_config/config_groups>`_, example:: @config @dataclass class Config(BaseConfig): db: Any = MISSING @config(name="mysql", group="db") @dataclass class MySqlConfig: host: str = "mysql://localhost" @config(name="postgres", group="db") @dataclass class PostgresConfig: host: str = "postgres://localhost" postgres_specific_data: str = "some special data" Then when running the job you can do the following:: $ python3 -m project_package db=mysql :param name: Name of the configuration, used to locate overrides. :type name: str :param group: Group name to support Hydra Config Groups. :type group: str """ @functools.wraps(config_class) def wrapper(config_class, name, group): cs = ConfigStore.instance() cs.store(name=name, node=config_class, group=group) return config_class if config_class: return wrapper(config_class, name, group) def recursive_wrapper(config_class): return config(config_class, name, group) return recursive_wrapper
6e3828c51835f8a9a9ed3afc4b5d9606d8ba9e50
3,647,362
def truncate_words(text, max_chars, break_words=False, padding=0): """ Truncate a string to max_chars, optionally truncating words """ if break_words: return text[:-abs(max_chars - len(text)) - padding] words = [] for word in text.split(): length = sum(map(len, words)) + len(word) + len(words) - 1 + padding if length >= max_chars: break words.append(word) return ' '.join(words)
b239822fd1cef6c2e3f7425a0ad5cbc32b8b1325
3,647,363
def interpolate_mean_tpr(FPRs=None, TPRs=None, df_list=None): """ mean_fpr, mean_tpr = interpolate_mean_tpr(FPRs=None, TPRs=None, df_list=None) FPRs: False positive rates (list of n arrays) TPRs: True positive rates (list of n arrays) df_list: DataFrames with TPR, FPR columns (list of n DataFrames) """ # seed empty linspace mean_tpr, mean_fpr = 0, np.linspace(0, 1, 101) if TPRs and FPRs: for idx, PRs in enumerate(zip(FPRs, TPRs)): mean_tpr += np.interp(mean_fpr, PRs[0], PRs[1]) elif df_list: for idx, df_ in enumerate(df_list): mean_tpr += np.interp(mean_fpr, df_.FPR, df_.TPR) else: print("Please give valid inputs.") return None, None # normalize by length of inputs (# indices looped over) mean_tpr /= (idx+1) # add origin point mean_fpr = np.insert(mean_fpr, 0, 0) mean_tpr = np.insert(mean_tpr, 0, 0) return mean_fpr, mean_tpr
47f53c0f5010620bf54119a0f35eff13143fa960
3,647,364
def _generate_copy_from_codegen_rule(plugin, target_name, thrift_src, file): """Generates a rule that copies a generated file from the plugin codegen output directory out into its own target. Returns the name of the generated rule. """ invoke_codegen_rule_name = _invoke_codegen_rule_name( plugin, target_name, thrift_src, ) plugin_path_prefix = "gen-cpp2-{}".format(plugin.name) rule_name = _copy_from_codegen_rule_name(plugin, target_name, thrift_src, file) cmd = " && ".join( [ "mkdir `dirname $OUT`", "cp $(location :{})/{} $OUT".format(invoke_codegen_rule_name, file), ], ) fb_native.genrule( name = rule_name, out = "{}/{}".format(plugin_path_prefix, file), cmd = cmd, ) return rule_name
652fad4701299d031fd6258ce4d2d8b097af3406
3,647,365
def mir_right(data): """ Append Mirror to right """ return np.append(data[...,::-1],data,axis=-1)
7daa6b4c70e80fcbeda894b7ff358b44149edf22
3,647,366
def mask_nms(masks, bbox_scores, instances_confidence_threshold=0.5, overlap_threshold=0.7): """ NMS-like procedure used in Panoptic Segmentation Remove the overlap areas of different instances in Instance Segmentation """ panoptic_seg = np.zeros(masks.shape[:2], dtype=np.uint8) sorted_inds = list(range(len(bbox_scores))) current_segment_id = 0 segments_score = [] for inst_id in sorted_inds: score = bbox_scores[inst_id] if score < instances_confidence_threshold: break mask = masks[:, :, inst_id] mask_area = mask.sum() if mask_area == 0: continue intersect = (mask > 0) & (panoptic_seg > 0) intersect_area = intersect.sum() if intersect_area * 1.0 / mask_area > overlap_threshold: continue if intersect_area > 0: mask = mask & (panoptic_seg == 0) current_segment_id += 1 # panoptic_seg[np.where(mask==1)] = current_segment_id # panoptic_seg = panoptic_seg + current_segment_id*mask panoptic_seg = np.where(mask == 0, panoptic_seg, current_segment_id) segments_score.append(score) # print(np.unique(panoptic_seg)) return panoptic_seg, segments_score
92465752e741fffb04ea1a243dd465799daf6598
3,647,367
def table_to_lookup(table): """Converts the contents of a dynamodb table to a dictionary for reference. Uses dump_table to download the contents of a specified table, then creates a route lookup dictionary where each key is (route id, express code) and contains elements for avg_speed, and historic_speeds. Args: table: A boto3 Table object from which all data will be read into memory and returned. Returns: A dictionary with (route id, segment id) keys and average speed (num), historic speeds (list), and local express code (str) data. """ # Put the data in a dictionary to reference when adding speeds to geojson items = dump_table(table) route_lookup = {} for item in items: if 'avg_speed_m_s' in item.keys(): route_id = int(item['route_id']) local_express_code = item['local_express_code'] hist_speeds = [float(i) for i in item['historic_speeds']] route_lookup[(route_id, local_express_code)] = { 'avg_speed_m_s': float(item['avg_speed_m_s']), 'historic_speeds': hist_speeds } return route_lookup
97a95a2e06e81907d2d1091e2ebf41a26808653a
3,647,368
import time def sh(cmd, stdin="", sleep=False): """ run a command, send stdin and capture stdout and exit status""" if sleep: time.sleep(0.5) # process = Popen(cmd.split(), stdin=PIPE, stdout=PIPE) process = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE) process.stdin.write(bytes(stdin, "utf-8")) stdout = process.communicate()[0].decode('utf-8').strip() process.stdin.close() returncode = process.returncode return returncode, stdout
22db066b2e4b429b1756bf00d4592359c79939cd
3,647,370
def array_to_slide(arr: np.ndarray) -> openslide.OpenSlide: """converts a numpy array to a openslide.OpenSlide object Args: arr (np.ndarray): input image array Returns: openslide.OpenSlide: a slide object from openslide """ assert isinstance(arr, np.ndarray) slide = openslide.ImageSlide(Image.fromarray(arr)) return slide
83a5d2def7a0626a24396b933bd984a6de7ae634
3,647,371
from typing import Sequence import copy def baseline_replacement_by_blur(arr: np.array, patch_slice: Sequence, blur_kernel_size: int = 15, **kwargs) -> np.array: """ Replace a single patch in an array by a blurred version. Blur is performed via a 2D convolution. blur_kernel_size controls the kernel-size of that convolution (Default is 15). Assumes unbatched channel first format. """ nr_channels = arr.shape[0] # Create blurred array. blur_kernel_size = (1, *([blur_kernel_size] * (arr.ndim - 1))) kernel = np.ones(blur_kernel_size, dtype=arr.dtype) kernel *= 1.0 / np.prod(blur_kernel_size) kernel = np.tile(kernel, (nr_channels, 1, *([1] * (arr.ndim - 1)))) if arr.ndim == 3: arr_avg = conv2D_numpy( x=arr, kernel=kernel, stride=1, padding=0, groups=nr_channels, pad_output=True, ) elif arr.ndim == 2: raise NotImplementedError("1d support not implemented yet") else: raise ValueError("Blur supports only 2d inputs") # Perturb array. arr_perturbed = copy.copy(arr) arr_perturbed[patch_slice] = arr_avg[patch_slice] return arr_perturbed
5e5f25eeff3caebbdd8c9612fb26d23a11e3f80e
3,647,372
def admit_dir(file): """ create the admit directory name from a filename This filename can be a FITS file (usually with a .fits extension or a directory, which would be assumed to be a CASA image or a MIRIAD image. It can be an absolute or relative address """ loc = file.rfind('.') ext = '.admit' if loc < 0: return file + ext else: if file[loc:] == ext: print "Warning: assuming a re-run on existing ",file return file return file[:loc] + ext
16d4b32b3994a3a260556b984e86e6507cad581b
3,647,373
def grad(values: list[int], /) -> list[int]: """Compute the gradient of a sequence of values.""" return [v2 - v1 for v1, v2 in zip(values, values[1:])]
a1df1dffb27028dc408b00ec8ac26b6f68d9c923
3,647,374
from typing import Union def subtract( num1: Union[int, float], num2: Union[int, float], *args ) -> Union[int, float]: """Subtracts given numbers""" sub: Union[int, float] = num1 - num2 for num in args: sub -= num return sub
55db772fdcdc9aa24fd61069a3adcd6bc7abe468
3,647,375
import pkgutil import importlib def get_submodules(module): """ Attempts to find all submodules of a given module object """ _skip = [ "numpy.f2py", "numpy.f2py.__main__", "numpy.testing.print_coercion_tables", ] try: path = module.__path__ except Exception: path = [getfile(module)] modules = {_name(module): module} for importer, modname, ispkg in pkgutil.walk_packages( path=path, prefix=_name(module) + ".", onerror=lambda x: None, ): # Some known packages cause issues if modname in _skip: continue try: modules[modname] = importlib.import_module(modname) except Exception: pass return modules
214b2aa0551e59a11bd853791ec64e9a238fd155
3,647,376
import inspect import hashlib def ucr_context_cache(vary_on=()): """ Decorator which caches calculations performed during a UCR EvaluationContext The decorated function or method must have a parameter called 'context' which will be used by this decorator to store the cache. """ def decorator(fn): assert 'context' in fn.__code__.co_varnames assert isinstance(vary_on, tuple) @wraps(fn) def _inner(*args, **kwargs): # shamelessly stolen from quickcache callargs = inspect.getcallargs(fn, *args, **kwargs) context = callargs['context'] prefix = '{}.{}'.format( fn.__name__[:40] + (fn.__name__[40:] and '..'), hashlib.md5(inspect.getsource(fn).encode('utf-8')).hexdigest()[-8:] ) cache_key = (prefix,) + tuple(callargs[arg_name] for arg_name in vary_on) if context.exists_in_cache(cache_key): return context.get_cache_value(cache_key) res = fn(*args, **kwargs) context.set_cache_value(cache_key, res) return res return _inner return decorator
85c6b511761eddb74c8a856f9d2c2f1029cba8b2
3,647,377
def extract_value(item, key): """Get the value for the given key or return an empty string if no value is found.""" value = item.find(key).text if value is None: value = "" else: value = sanitize_value(value) return value
228115b77b0298bb808dcbb0dbd89a0938f0feef
3,647,378
def create_train_test_split(data: pd.DataFrame, split_size: float = .8, seed: int = 42) -> list: """ takes the final data set and splits it into random train and test subsets. Returns a list containing train-test split of inputs Args: data: dataset to be split into train/test split_size: the size of the train dataset (default .8) seed: pass an int for reproducibility purposes Returns: A list containing train-test split of inputs """ # assert split size between 0 and 1 assert 0 <= split_size <= 1, "split_size out of bounds" assert isinstance(data, pd.DataFrame), "no DataFrame provided" assert isinstance(seed, int), "provided seed is no integer" # split into features and target # features = data.drop('target', axis=1) # target = data['target'] # stratify by the target to ensure equal distribution return train_test_split(data, train_size=split_size, random_state=seed, shuffle=True)
49a5a07e5f232106d502eaf2f9d9021644f37e27
3,647,379
def get_vec(text, model, stopwords): """ Transform text pandas series in array with the vector representation of the sentence using fasttext model """ array_fasttext = np.array([sent2vec(x, model, stopwords) for x in text]) return array_fasttext
9c28cf8cd2c6acea3f81a5290d6f6c9fd027a6ca
3,647,380
from typing import Tuple import math def rytz_axis_construction(d1: Vector, d2: Vector) -> Tuple[Vector, Vector, float]: """ The Rytz’s axis construction is a basic method of descriptive Geometry to find the axes, the semi-major axis and semi-minor axis, starting from two conjugated half-diameters. Source: `Wikipedia <https://en.m.wikipedia.org/wiki/Rytz%27s_construction>`_ Given conjugated diameter `d1` is the vector from center C to point P and the given conjugated diameter `d2` is the vector from center C to point Q. Center of ellipse is always ``(0, 0, 0)``. This algorithm works for 2D/3D vectors. Args: d1: conjugated semi-major axis as :class:`Vector` d2: conjugated semi-minor axis as :class:`Vector` Returns: Tuple of (major axis, minor axis, ratio) """ Q = Vector(d1) # vector CQ P1 = Vector(d2).orthogonal(ccw=False) # vector CP', location P' D = P1.lerp(Q) # vector CD, location D, midpoint of P'Q radius = D.magnitude radius_vector = (Q - P1).normalize(radius) # direction vector P'Q A = D - radius_vector # vector CA, location A B = D + radius_vector # vector CB, location B if A.isclose(NULLVEC) or B.isclose(NULLVEC): raise ArithmeticError('Conjugated axis required, invalid source data.') major_axis_length = (A - Q).magnitude minor_axis_length = (B - Q).magnitude if math.isclose(major_axis_length, 0.) or math.isclose(minor_axis_length, 0.): raise ArithmeticError('Conjugated axis required, invalid source data.') ratio = minor_axis_length / major_axis_length major_axis = B.normalize(major_axis_length) minor_axis = A.normalize(minor_axis_length) return major_axis, minor_axis, ratio
14fec51fa1591e74553e6918187dfb850ae4b6f0
3,647,381
def _join_ljust(words, width=9): """join list of str to fixed width, left just""" return ' '.join(map(lambda s: s.ljust(width), words)).strip()
6096f5be960fb0ae2fe942c9b55924802094db16
3,647,382
def construct_dictionaries(color,marker,size, scatter_ecolor='k', alpha=1.0, fill_scatter=False, elinewidth=1,capsize=0): """ Example usage: halo_kws = construct_dictionaries('k','o', 20, alpha=.3) pltabund.plot_XFe_XFe(ax, 'K', 'Mg', roed, plot_xlimit=True, plot_ylimit=True, label="Halo", **halo_kws) """ e_kws = {'ecolor':color,'elinewidth':elinewidth,'capsize':capsize} if fill_scatter: ulkws = {'arrow_length':0.25, 'scatter_kws':{'marker':marker,'s':size,'facecolor':color,'alpha':alpha}, 'arrow_kws':{'color':color,'head_length':0.15,'head_width':0.05} } else: ulkws = {'arrow_length':0.25, 'scatter_kws':{'marker':marker,'s':size,'facecolor':'none', 'linewidths':1,'edgecolors':color,'alpha':alpha}, 'arrow_kws':{'color':color,'head_length':0.15,'head_width':0.05} } kws = {'color':color,'edgecolors':scatter_ecolor,'marker':marker,'s':size,'alpha':alpha, 'e_kws':e_kws,'ulkws':ulkws} return kws
6741dd1ffacc3a10953f86ccdaf53d1d3504a77c
3,647,383
def arrows(m) -> str: """One or more arrows separate by a space""" return m.arrow_list
3fb43d9d753f667148bb9bb18eb026f7385d6264
3,647,385
def hex2int(hex_str): """ Convert 2 hex characters (e.g. "23") to int (35) :param hex_str: hex character string :return: int integer """ return int(hex_str, 16)
0640cffd6f7558f4dfd1bc74e20510e7d2051ca3
3,647,386
def lat_2_R(lat): """Takes a geodetic latitude and puts out the distance from the center of an ellipsoid Earth to the surface Arguments: lat (float): Geodetic Latitude angle [degrees]. Returns: (float): Geocentric distance [km] """ R_polar = 6356.752314245 R_eq = 6378.137 lat = lat / 180 * pi R = sqrt( ((R_eq ** 2 * cos(lat)) ** 2 + (R_polar ** 2 * sin(lat)) ** 2) / ((R_eq * cos(lat)) ** 2 + (R_polar * sin(lat)) ** 2) ) # e = sqrt(1-R_polar**2/R_eq**2) # R = R_eq/sqrt(1-e**2*sin(lat/180*pi)**2) # R = sqrt( ( (R_eq**2*cos(lat))**2 + (R_polar**2*sin(lat))**2 ) / ( (R_eq*cos(lat))**2 + (R_polar*sin(lat))**2 ) ) return R
aa4f24ccb8a82bad1e6d9fc49f7e2984874ee78b
3,647,387
def compare_records(old_list, new_list): """Compare two lists of SeqRecord objects.""" assert isinstance(old_list, list) assert isinstance(new_list, list) assert len(old_list) == len(new_list) for old_r, new_r in zip(old_list, new_list): if not compare_record(old_r, new_r): return False return True
6b70115f9db52898849fa065eff6ed827eab1e16
3,647,388
def embed_owners_wallet(address, asset_name) -> Embed: """Return discord embed of wallet owner""" title = f"{asset_name} is owned by" description = f"`{address}`" color = Colour.blurple() embed = Embed(title=title, description=description, color=color) name = "This address belongs to wallet..." value = f"{POOL_PM_URL}/{address}/0e14267a" embed.add_field(name=name, value=value, inline=False) embed.set_footer(text=f"Data comes from {POOL_PM_URL}") return embed
016039acbdf264e5389d3e4f94153345226d2549
3,647,389
def Qdist_H_lm_jk_FGH(a, b, c , d, N_x, N_y, h, par, model): """ Parameters ---------- a : TYPE left end point of the interval in x coordinate chosen for solving the time independent schrodinger equation. b : TYPE right end point of the interval in x coordinate chosen for solving the time independent schrodinger equation. c : TYPE left end point of the interval in y coordinate chosen for solving the time independent schrodinger equation. d : TYPE right end point of the interval in y coordinate chosen for solving the time independent schrodinger equation. N_x : TYPE number of grid points in x coordinate, must be an odd integer value. N_y : TYPE number of grid points in y coordinate, must be an odd integer value. h : TYPE reduced planck's constant of the system. par : TYPE parameters of the potential energy function. Returns ------- H_lm_jk : TYPE Hamiltonian matrix which is a discretisation of the Hamiltonian operator, computed using the FGH method. """ dx = (b-a)/(N_x-1) dpx = 2*np.pi / (N_x-1) / dx dy = (d-c)/(N_y-1) dpy = 2*np.pi / (N_y-1) / dy H_lm_jk = np.zeros((N_x*N_y,N_x*N_y)) for i1 in range(N_x*N_y): #print(i1) for i2 in range(i1, N_x*N_y): k = (i2 // N_x) j = (i2 % N_x) m = (i1 // N_x) l = (i1 % N_x) sum_rhalf = sum(np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) sum_r = 2 * sum_rhalf + 1 sum_shalf = sum(np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) sum_s = 2 * sum_shalf + 1 if j == l and k == m: H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_x+1)/2))*dpx)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) * sum_s \ + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_y+1)/2))*dpy)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) * sum_r H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + potential_energy_2dof((a+j*dx),(c+k*dy),par, model) H_lm_jk[k*N_x+j,m*N_x+l] = H_lm_jk[m*N_x+l,k*N_x+j] else: H_lm_jk[m*N_x+l,k*N_x+j] = H_lm_jk[m*N_x+l,k*N_x+j] + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_x+1)/2))*dpx)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_x+1)/2))*(j-l)/(N_x-1))) * sum_s \ + 2 * 1/(N_x-1) /(N_y-1)* h**2 * sum((np.arange(1, int((N_y+1)/2))*dpy)**2 / 2 * np.cos(2*np.pi*np.arange(1, int((N_y+1)/2))*(k-m)/(N_y-1))) * sum_r H_lm_jk[k*N_x+j,m*N_x+l] = H_lm_jk[m*N_x+l,k*N_x+j] return H_lm_jk
f67c369f5eadd273afa3a2af7cf9d9b274d7a994
3,647,390
from django.core.exceptions import ObjectDoesNotExist from physical.models import Host from backup.models import LogConfiguration def get_log_configuration_retention_backup_log_days(host_id): """Return LOG_CONFIGURATION_RETENTION_BACKUP_LOG_DAYS""" try: host = Host.objects.get(id=host_id) databaseinfra = host.instances.all()[0].databaseinfra except Exception as e: LOG.warn("Error on get_log_configuration_retention_backup_log_days. Host id: {} - error: {}".format(host_id, e)) return None try: log_configuration = LogConfiguration.objects.get(environment=databaseinfra.environment, engine_type=databaseinfra.engine.engine_type) except ObjectDoesNotExist: return None return log_configuration.retention_days
02ed8a863fd383c15eb25c37553a7b521a2b27fe
3,647,391
def createLabelsAndWeightsFromRois(image, roiimage): """Create class labels and instance labels. Args: image: input image roiimage: input mask image Returns: classlabelsdata, instancelabelsdata, total_instances """ logger.info("Creating class and instance labels ...") W = image.shape[1] H = image.shape[0] logger.info("H, W = {},{}".format(H, W)) classlabelsdata = np.ones([H*W]) instancelabelsdata = np.zeros([H*W]) roi_val = np.unique(roiimage) total_instances = 1 roiimage = roiimage.reshape(-1) for j in range(roi_val.shape[0]): if roi_val[j]>0: indices = np.where(roiimage == roi_val[j]) for ind in indices: classlabelsdata[ind] = 2 instancelabelsdata[ind] = total_instances total_instances+=1 return classlabelsdata, instancelabelsdata, total_instances
747a99c9a1e27666dfc5f6aa73394d86ee4e7a19
3,647,392
def dup_inner_refine_complex_root(f, x, y, dx, dy, F, K): """One bisection step of complex root refinement algorithm. """ hx, hy = dx/2, dy/2 cx, cy = x + hx, y + hy F1, F2, F3, F4 = F Fx = _dup_inner_sturm(f, K.one, K.zero, cx, cy, K) Fy = _dup_inner_sturm(f, K.zero, K.one, cx, cy, K) # Quadrant #1: ++ F11 = Fx F12 = _dup_sturm_shift(F2, hx, K) F13 = F3 F14 = _dup_sturm_mirror(_dup_sturm_shift(Fy, hy, K), K) k1 = _dup_inner_zeros(F11, F12, F13, F14, hx, hy, K) if k1 == 1: return (cx, cy, hx, hy, (F11, F12, F13, F14)) # Quadrant #2: -+ F21 = _dup_sturm_shift(Fx,-hx, K) F22 = Fy F23 = _dup_sturm_shift(F3, hx, K) F24 = F4 k2 = _dup_inner_zeros(F21, F22, F23, F24, hx, hy, K) if k2 == 1: return (x, cy, hx, hy, (F21, F22, F23, F24)) # Quadrant #3: -- F31 = F1 F32 = _dup_sturm_shift(Fy,-hy, K) F33 = _dup_sturm_mirror(Fx, K) F34 = _dup_sturm_shift(F4, hy, K) k3 = _dup_inner_zeros(F31, F32, F33, F34, hx, hy, K) if k3 == 1: return (x, y, hx, hy, (F31, F32, F33, F34)) # Quadrant #4: +- F41 = _dup_sturm_shift(F1, hx, K) F42 = F2 F43 = _dup_sturm_mirror(_dup_sturm_shift(Fx, hx, K), K) F44 = _dup_sturm_mirror(Fy, K) k4 = _dup_inner_zeros(F41, F42, F43, F44, hx, hy, K) if k4 == 1: return (cx, y, hx, hy, (F41, F42, F43, F44)) raise RefinementFailed("no roots in (%s, %s) x (%s, %s) rectangle" % (x, y, x+dx, y+dy))
c45e1deba1ad68fb9a85a2e46ca4e3c7eddb072d
3,647,393
def print_dataset_info(superclasses, subclass_splits, label_map, label_map_sub): """ Obtain a dataframe with information about the superclasses/subclasses included in the dataset. Args: superclasses (list): WordNet IDs of superclasses subclass_splits (tuple): Tuple entries correspond to the source and target domains respectively. A tuple entry is a list, where each element is a list of subclasses to be included in a given superclass in that domain. If split is None, the second tuple element is empty. label_map (dict): Map from (super)class number to superclass name label_map_sub (dict): Map from subclass number to subclass name (equivalent to label map for original dataset) Returns: dataDf (pandas DataFrame): Columns contain relevant information about the datast """ def print_names(class_idx): return [f'{label_map_sub[r].split(",")[0]} ({r})' for r in class_idx] data = {'superclass': []} contains_split = len(subclass_splits[1]) if contains_split: data.update({'subclasses (source)': [], 'subclasses (target)': []}) else: data.update({'subclasses': []}) for i, (k, v) in enumerate(label_map.items()): data['superclass'].append(f'{v}') if contains_split: data['subclasses (source)'].append(print_names(subclass_splits[0][i])) data['subclasses (target)'].append(print_names(subclass_splits[1][i])) else: data['subclasses'].append(print_names(subclass_splits[0][i])) dataDf = pd.DataFrame(data) return dataDf
6885eecfaa38f609957b6ccfa594701f77ea1b30
3,647,394
def mock_sync_cavatica_account(mocker): """ Mocks out sync Cavatica account functions """ sync_cavatica_account = mocker.patch( "creator.projects.cavatica.sync_cavatica_account" ) sync_cavatica_account.return_value = [], [], [] return sync_cavatica_account
27a0a8abee2c025fe17ba4fa4a939bcf04fc9c63
3,647,395
def check_illegal(s): """ :param s: (String) user input :return: (Bool) check user input is illegal or not """ check = 0 for ch in s: if len(ch) > 1: check = 1 if check == 0: return True else: print("Illegal input") return False
6c028f03ae6f317e7fea020e2da1f35b93d3bcd7
3,647,396
def phasefold(time, rv, err, period): """Phasefold an rv timeseries with a given period. Parameters ---------- time : array_like An array containing the times of measurements. rv : array_like An array containing the radial-velocities. err : array_like An array containing the radial-velocity uncertainties. period : float The period with which to phase fold. Returns ------- time_phased : array_like The phased timestamps. rv_phased : array_like The phased RVs. err_phased : array_like The phased RV uncertainties. """ phases = (time / period) % 1 sortIndi = sp.argsort(phases) # sorts the points # gets the indices so we sort the RVs correspondingly(?) time_phased = phases[sortIndi] rv_phased = rv[sortIndi] err_phased = err[sortIndi] return time_phased, rv_phased, err_phased
c0076fd49fa9c97fa90419f6f2d42d78c09f0f2e
3,647,397
def get_att_mats(translate_model): """ Get's the tensors representing the attentions from a build model. The attentions are stored in a dict on the Transformer object while building the graph. :param translate_model: Transformer object to fetch the attention weights from. :return: """ encdec_atts = [] prefix = 'transformer/body/' postfix = '/multihead_attention/dot_product_attention' for i in range(1, translate_model.hparams.num_hidden_layers): encdec_att = translate_model.attention_weights[ '%sdecoder/layer_%i/encdec_attention%s' % (prefix, i, postfix)] encdec_atts.append(encdec_att) encdec_att_mats = [tf.squeeze(tf.reduce_sum(mat, axis=1)) for mat in encdec_atts] return encdec_att_mats
d6c7f0d214c444250210c926b2407405e8e34807
3,647,398
def get_resource_string(package, resource): """Return a string containing the contents of the specified resource. If the pathname is absolute it is retrieved starting at the path of the importer for 'fullname'. Otherwise, it is retrieved relative to the module within the loader. """ provider, resource = NormalizeResource(package, resource) return provider.get_resource_string(_resource_manager, resource)
e0e76f912e02d30645b646e076c10cbd5016a600
3,647,399
def get_initialize_cams(number_of_camera = 7,use_camera=True): """ initialize all camera Args: number_of_camera (int, optional): [description]. Defaults to 7. Returns: list : in list cap object """ cap_list=[] if use_camera: for _ in range(1,number_of_camera+1): cap= cv2.VideoCapture() cap_list.append(cap) else: cap1=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000004226000000.mp4") cap2=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000004888000100.mp4") cap3=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000004987000000.mp4") cap4=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000005012000000.mp4") cap5=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000005066000100.mp4") cap6=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000005142000000.mp4") cap7=cv2.VideoCapture(r"D:\Lakshit\Duke\dukeplasto\full_video\00000005143000100.mp4") cap_list.append(cap1) cap_list.append(cap2) cap_list.append(cap3) cap_list.append(cap4) cap_list.append(cap5) cap_list.append(cap6) cap_list.append(cap7) return cap_list
374e987647d462fbc4bc755176f1c879b15be0e8
3,647,400
def vpc_security_group_list(rds_instance): """ If VPC security group rule is open to public add to List and return. Args: rds_instance (dict): All the running rds instance on the region Returns: list: List of VPC Security Group Id's """ vpc_list = [] if rds_instance.get('VpcSecurityGroups'): for sec_group in rds_instance['VpcSecurityGroups']: if sec_group['Status'] == 'active': sec_group_rule = ec2.describe_security_group_rules(Filters=[ { 'Name': 'group-id', 'Values': [ sec_group['VpcSecurityGroupId'] ] }, ], MaxResults=512) if rds_sec_group_allowed(sec_group_rule, rds_instance['DbInstancePort']): vpc_list.append(sec_group['VpcSecurityGroupId']) return vpc_list
02fac45e235c8820d5890ab121feaf1a78a6416f
3,647,401
def expectation_l(u_values, params_list): """ compute proba for each copula mix to describe the data :param u_values: :param params_list: :return: """ l_state = np.zeros((u_values.shape[0], len(COPULA_DENSITY))) dcopula = np.zeros((u_values.shape[0], len(COPULA_DENSITY))) for copula in COPULA_DENSITY.keys(): dcopula[:, params_list['order'][copula]] = ( params_list['alpha'][params_list['order'][copula]] * ( params_list[copula]['pi'] + (1 - params_list[copula]['pi']) * COPULA_DENSITY[copula]( u_values, params_list[copula]['theta'], ) ) ) for copula in COPULA_DENSITY.keys(): l_state[:, params_list['order'][copula]] = \ dcopula[:, params_list['order'][copula]] / np.sum(dcopula, axis=1) return l_state
e446fe9b20e3909ca0fb864c86b0f652faf908a2
3,647,402
import scipy def lanczosSubPixShift( imageIn, subPixShift, kernelShape=3, lobes=None ): """ lanczosSubPixShift( imageIn, subPixShift, kernelShape=3, lobes=None ) imageIn = input 2D numpy array subPixShift = [y,x] shift, recommened not to exceed 1.0, should be float Random values of kernelShape and lobes gives poor performance. Generally the lobes has to increase with the kernelShape or you'll get a lowpass filter. Generally lobes = (kernelShape+1)/2 kernelShape=3 and lobes=2 is a lanczos2 kernel, it has almost no-lowpass character kernelShape=5 and lobes=3 is a lanczos3 kernel, it's the typical choice Anything with lobes=1 is a low-pass filter, but next to no ringing artifacts """ lanczos_filt = lanczosSubPixKernel( subPixShift, kernelShape=kernelShape, lobes=lobes ) # Accelerate this with a threadPool imageOut = scipy.ndimage.convolve( imageIn, lanczos_filt, mode='reflect' ) return imageOut
c7bbf51f94ab323ae9bf15f0af7c0d3dfa11aaeb
3,647,403
def open_pdb(file_location): """ Opens PDB File. Parameters __________ file_location : str The Location for the PDB File. Returns _______ symbols : list Gives Atomic Symbols for Atoms from PDB File. coordinates: np.ndarray Gives Atomic Coordinates for the PDB File. """ # Reads PDB File and Returns Coordinates + Atom Names. with open(file_location) as pdb_file: pdb_data = pdb_file.readlines() pdb_file.close() # Generate Coordinates and Symbols Lists coordinates = [] symbols = [] # Cycle PDB_DATA for line in pdb_data: if 'ATOM' in line[0:6] or 'HETATM' in line[0:6]: symbols.append(line[76:79].strip()) atom_coordinates = [float(x) for x in line[30:55].split()] coordinates.append(atom_coordinates) coordinates = np.array(coordinates) # End of Script return symbols, coordinates
25a0946aeae277f4d60cdba79c7aa4f973853027
3,647,404
def _remove_statements(evaluator, stmt, name): """ This is the part where statements are being stripped. Due to lazy evaluation, statements like a = func; b = a; b() have to be evaluated. """ types = [] # Remove the statement docstr stuff for now, that has to be # implemented with the evaluator class. #if stmt.docstr: #res_new.append(stmt) check_instance = None if isinstance(stmt, er.InstanceElement) and stmt.is_class_var: check_instance = stmt.instance stmt = stmt.var types += evaluator.eval_statement(stmt, seek_name=name) if check_instance is not None: # class renames types = [er.get_instance_el(evaluator, check_instance, a, True) if isinstance(a, (er.Function, pr.Function)) else a for a in types] return types
022204865e1a44aa4741e8e42acd3a3ea66a1a38
3,647,405
from typing import Tuple def transform_vector_global_to_local_frame( vector: Tuple[float, float, float], theta: float ) -> Tuple[float, float, float]: """ Transform a vector from global frame to local frame. :param vector: the vector to be rotated :param theta: the amount to rotate by :return the transformed vector. """ return rotate_vector(vector, theta)
57e67029dfb9b6d8242930d6227d655508ae68c3
3,647,406
from typing import List def kadane_algorithm(sequence: List[int]): """Greedy algorithm to track max sum so far - O(n) time and O(1) space""" if len(sequence) < 1: return 0 max_sum = sequence[0] curr_sum = sequence[0] for curr_index in range(1, len(sequence)): curr_sum = max(sequence[curr_index], curr_sum + sequence[curr_index]) max_sum = max(curr_sum, max_sum) return max_sum
f6096309055e52538f9a5f9b5b769b269688b068
3,647,408
from typing import Iterable def allow_domains(request: HttpRequest, domains: Iterable[str]) -> HttpResponse: """ Serves a cross-domain access policy allowing a list of domains. Note that if this is returned from the URL ``/crossdomain.xml`` on a domain, it will act as a master policy and will not permit other policies to exist on that domain. If you need to set meta-policy information and allow other policies, use the view :view:`flashpolicies.views.metapolicy` for the master policy instead. **Required arguments:** ``domains`` A list of domains from which to allow access. Each value may be either a domain name (e.g., ``example.com``) or a wildcard (e.g., ``*.example.com``). Due to serious potential security issues, it is strongly recommended that you not use wildcard domain values. **Optional arguments:** None. """ return serve(request, policies.Policy(*domains))
38de0a97734893f618903bd5133dc794521effcf
3,647,409
def get_Cs_OR(): """その他の居室の照明区画iに設置された照明設備の人感センサーによる補正係数 Args: Returns: float: Cs_OR その他の居室の照明区画iに設置された照明設備の人感センサーによる補正係数 """ return 1.0
afcbbb70ad7589aa7dace741931c39434b444ba1
3,647,410
def build_url(station, d1, d2): """ Return the URL to fetch the response record for USArray MT station identifier *station* for the time range *d1* to *d2*. """ return 'http://service.iris.edu/irisws/resp/1/query?net=EM&sta={}&loc=--&cha=*&starttime={:%Y-%m-%dT%H:%M:%S}&endtime={:%Y-%m-%dT%H:%M:%S}'.format(station, d1, d2)
221d5f7a321d0e9337dbbe75e419298bcd3ab5c0
3,647,411
def get_tlinks(timeml_doc): """ get tlinks from annotated document """ root = xml_utilities.get_root(timeml_doc) tlinks = [] for e in root: if e.tag == "TLINK": tlinks.append(e) return tlinks
376136a647f6525136643e67c85925268813296a
3,647,413
from typing import Sequence from typing import Any def stack(xs: Sequence[Any], axis: int = 0) -> Any: """ Stack the (leaf) arrays from xs :param xs: list of trees with the same shape, where the leaf values are numpy arrays :param axis: axis to stack along """ return multimap(lambda *xs: np.stack(xs, axis=axis), *xs)
af2a7d6baf23597caf83bb4dcbb226d255c7dcbb
3,647,414
import csv def load_LAC_geocodes_info(path_to_csv): """Import local area unit district codes Read csv file and create dictionary with 'geo_code' PROVIDED IN UNIT?? (KWH I guess) Note ----- - no LAD without population must be included """ with open(path_to_csv, 'r') as csvfile: read_lines = csv.reader(csvfile, delimiter=',') # Read line _headings = next(read_lines) # Skip first row data = {} for row in read_lines: values_line = {} for nr, value in enumerate(row[1:], 1): try: values_line[_headings[nr]] = float(value) except: values_line[_headings[nr]] = str(value) # Add entry with geo_code data[row[0]] = values_line return data
bd97d888ddb58469b111b41a7ea0a5a9e0be88fd
3,647,415
def singleton(class_): """Decorator for singleton class.""" instances = {} def get_instance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return get_instance
3dbc0e9525812b2698bc3be21aed18028eb39408
3,647,416
def convert_model_to_half(model): """ Converts model to half but keeps the batch norm layers in 32 bit for precision purposes """ old_model = model new_model = BN_convert_float(model.half()) del old_model # Delete previous non-half model return new_model
3902ce122fa2fd89d7bf5d35e91fbf743b698bc7
3,647,417
def tokenize_sentences(sentences): """ Tokenize sentences into tokens (words) Args: sentences: List of strings Returns: List of lists of tokens """ # Initialize the list of lists of tokenized sentences tokenized_sentences = [] ### START CODE HERE (Replace instances of 'None' with your code) ### # Go through each sentence for sentence in sentences: # Convert to lowercase letters sentence = sentence.lower() # Convert into a list of words tokenized = nltk.word_tokenize(sentence) # append the list of words to the list of lists tokenized_sentences.append(tokenized) ### END CODE HERE ### return tokenized_sentences
8c6b1cb4dd390051755cf17df533f3a1ff71b1a3
3,647,418
def makeId(timestamp = 0, machine = 0, flow = 0): """ using unix style timestamp, not python timestamp """ timestamp -= _base return (timestamp<<13) | (machine << 8) | flow
29b175f07cb6e5c7ddc1f77f1fb7871514abc7df
3,647,420