content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def reduce_aet_if_dry(aet, wat_lev, fc): """ Reduce actual evapotranspiration if the soil is dry. If the water level in a cell is less than 0.7*fc, the rate of evapo-transpiration is reduced by a factor. This factor is 1 when wat_lev = 0.7*fc and decreases linearly to reach 0 when wat_lev = 0 i.e. where wat_lev < 0.7*fc, apply a correction factor of wat_lev/(0.7*fc) to the aet grid. Args: aet: "Raw" actual evapotranspiration grid. wat_lev: Water level grid fc: Soil field capacity grid. Returns: Array (modified AET grid with AET reduced where necessary). """ # Get a boolean array showing which cells need correcting bool_array = wat_lev < (0.7*fc) # Calculate a correction factor for all cells, but subtract 1 from answer cor_facts_minus1 = (wat_lev / (0.7*fc)) - 1 # Multiplying bool_array by cor_facts_minus1 gives a grid with values of # (cor_fact - 1) for cells which need correcting and zero otherwise. Add 1 # to this to get a grid of cor_facts and ones cor_facts = (bool_array * cor_facts_minus1) + 1 return aet*cor_facts
170462a23c3903a390b89963aa6ce21839e5d44b
3,638,112
def merge_sort(array): """ Sort array via merge sort algorithm Args: array: list of elements to be sorted Returns: Sorted list of elements Examples: >>> merge_sort([1, -10, 21, 3, 5]) [-10, 1, 3, 5, 21] """ if len(array) == 1: return array[:] mid = len(array) // 2 left = merge_sort(array[:mid]) right = merge_sort(array[mid:]) sort_result = _merge(left, right) return sort_result
9730f88be4a54334bac801dc4713bf20b683f824
3,638,113
def create_monitored_session(target: tf.train.Server, task_index: int, checkpoint_dir: str, save_checkpoint_secs: int, config: tf.ConfigProto=None) -> tf.Session: """ Create a monitored session for the worker :param target: the target string for the tf.Session :param task_index: the task index of the worker :param checkpoint_dir: a directory path where the checkpoints will be stored :param save_checkpoint_secs: number of seconds between checkpoints storing :param config: the tensorflow configuration (optional) :return: the session to use for the run """ # we chose the first task to be the chief is_chief = task_index == 0 # Create the monitored session sess = tf.train.MonitoredTrainingSession( master=target, is_chief=is_chief, hooks=[], checkpoint_dir=checkpoint_dir, save_checkpoint_secs=save_checkpoint_secs, config=config ) return sess
f963e61cf57aa602a9a9397104a935b5a17a6dc1
3,638,114
def sun_rise_set_times(datetime_index, coords): """ Return sunrise and set times for the given datetime_index and coords, as a Series indexed by date (days, resampled from the datetime_index). """ obs = ephem.Observer() obs.lat = str(coords[0]) obs.lon = str(coords[1]) # Ensure datetime_index is daily dtindex = pd.DatetimeIndex(datetime_index.to_series().map(pd.Timestamp.date).unique()) return pd.Series( _sun_rise_set(dtindex, obs), index=dtindex )
9c2dfb3c7c86c98144b23788b0b399899724c8ff
3,638,115
def get_n1_event_format(): """ Define the format for the events in a neurone recording. Arguments: None. Returns: - A Struct (from the construct library) describing the event format. """ # Define the data format of the events # noinspection PyUnresolvedReferences return Struct( "Revision" / Int32sl, "RFU1" / Int32sl, "Type" / Int32sl, "SourcePort" / Int32sl, "ChannelNumber" / Int32sl, "Code" / Int32sl, "StartSampleIndex" / Int64ul, "StopSampleIndex" / Int64ul, "DescriptionLength" / Int64ul, "DescriptionOffset" / Int64ul, "DataLength" / Int64ul, "DataOffset" / Int64ul, "RFU2" / Int32sl, "RFU3" / Int32sl, "RFU4" / Int32sl, "RFU5" / Int32sl )
8663c4b8ba8d83e10ed6dc03a35589c74cd23420
3,638,116
def idzp_rid(eps, m, n, matveca): """ Compute ID of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ proj = np.empty( m + 1 + 2*n*(min(m, n) + 1), dtype=np.complex128, order='F') k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj) if ier: raise _RETCODE_ERROR proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj
7655afb9b5c29f395ca6c5c83baaa8592f68d124
3,638,117
def update_position(request, space_url): """ This view saves the new note position in the debate board. Instead of reloading all the note form with all the data, we use the partial form "UpdateNotePosition" which only handles the column and row of the note. """ place = get_object_or_404(Space, url=space_url) if request.method == "POST" and request.is_ajax(): note = get_object_or_404(Note, pk=request.POST['noteid']) debate = get_object_or_404(Debate, pk=note.debate.id) position_form = UpdateNotePosition(request.POST or None, instance=note) if (request.user.has_perm('admin_space', place) or request.user.has_perm('mod_space', place) or request.user.has_perm('admin_debate', debate) or request.user.has_perm('mod_debate', debate) or request.user == note.author): if position_form.is_valid(): position_form_uncommited = position_form.save(commit=False) position_form_uncommited.column = get_object_or_404(Column, pk=request.POST['column']) position_form_uncommited.row = get_object_or_404(Row, pk=request.POST['row']) position_form_uncommited.save() return HttpResponse(_("Note updated")) else: return HttpResponseBadRequest(_("There has been an error validating the form.")) else: raise PermissionDenied else: return HttpResponseBadRequest(_("The petition was not POST."))
29cc55ad30c8cdf6a326381a9b267aaf515059b6
3,638,118
def tan(x): """Return the tangent of *x* radians.""" return 0.0
51a8f497b2cc81cfd0066a7f8f5b1afef362941e
3,638,119
def entropy(p): """ Calculates the Shannon entropy for a marginal distribution. Args: p (np.ndarray): the marginal distribution. Returns: (float): the entropy of p """ # Since zeros do not contribute to the Shannon entropy by definition, we # ignore them to avoid any errors/warnings. p = p[p != 0] H = -np.dot(p, np.log(p)) # Filter against machine epsilon return _eps_filter(H)
d9deb56211069e70ee688ec7cf9cea4cb6507d2a
3,638,120
def format_to_str(*a, **kwargs): """ Formats gotten objects to str. """ result = "" if kwargs == {}: kwargs = {'keepNewlines': True} for x in range(0, len(a)): tempItem = a[x] if type(tempItem) is str: result += tempItem elif type(tempItem) in [list, dict, tuple]: result += str(tempItem) # pformat(tempItem) elif hasattr(tempItem, "itemType"): result += "<" + tempItem.itemType + ":" + tempItem.itemModelPointer + ">" else: result += str(tempItem) if x < len(a) - 1: result += " " if not kwargs['keepNewlines']: result = result.replace("\n", "*nl*") return result
066262f6059a7f146026b1bc638b9119e2c34718
3,638,121
def zero_corrected_countless(data): """ Vectorized implementation of downsampling a 2D image by 2 on each side using the COUNTLESS algorithm. data is a 2D numpy array with even dimensions. """ # allows us to prevent losing 1/2 a bit of information # at the top end by using a bigger type. Without this 255 is handled incorrectly. data = data + 1 # don't use +=, it will affect the original data. sections = [] # This loop splits the 2D array apart into four arrays that are # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), # and (1,1) representing the A, B, C, and D positions from Figure 1. factor = (2, 2) for offset in np.ndindex(factor): part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] sections.append(part) a, b, c, d = sections ab = a * (a == b) # PICK(A,B) ac = a * (a == c) # PICK(A,C) bc = b * (b == c) # PICK(B,C) a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed result = a + (a == 0) * d - 1 # a or d - 1 return result
7b0dc08f0233f929b572d555ad611c8bf795bbfd
3,638,123
from typing import Union def mnn_synthetic_data( n_samples: int = 1000, n_features: int = 100, n_batches: int = 2, n_latent: int = 2, n_classes: int = 3, proportions: np.ndarray = None, sparsity: float = 1.0, scale: Union[int, float] = 5, batch_scale: float = 0.1, bio_batch_angle: Union[float, None] = None, seed: int = 2018, ): """ :param n_samples: number of samples (cells) per batch :param n_features: number of features (genes) :param n_batches: number of batches :param n_latent: size of the latent space used to generate data :param n_classes: number of classes shared across batches :param proportions: proportion of cells from each class in each batch. If shape is (n_classes,) same proportions used each time. If shape is (n_batches, n_classes) then each row is a different batch. Default is equal representation :param sparsity: sparsity of class weightings :param scale: scaling factor for generating data :param batch_scale: batch effect relative to data :param bio_batch_angle: angle of batch effect w/ bio subspace :param seed: seed for random number generator :return: real-valued expression data with batch effect and metadata """ if proportions is None: proportions = np.ones((n_batches, n_classes)) / n_classes else: proportions = np.broadcast_to(proportions, (n_batches, n_classes)) if seed: np.random.seed(seed) class_centers = latent.gen_classes(n_latent, n_classes, sparsity, scale) batches = np.repeat(np.arange(n_batches), n_samples) latent_exp = [] classes = [] for b in range(n_batches): b_latent, b_classes = latent.sample_classes( n_samples, class_centers, proportions[b, :] ) latent_exp.append(b_latent) classes.append(b_classes) latent_exp = np.vstack(latent_exp) classes = np.hstack(classes) programs = latent.gen_programs(n_latent, n_features, 1.0, 1.0) expression = np.dot(latent_exp, programs) projection_to_bio = np.dot(np.linalg.pinv(programs), programs) expression_w_batch = batch.add_batch_vectors( expression, batches, batch_scale, bio_batch_angle, projection_to_bio, copy=True ) adata = util.arrays_to_anndata( expression_w_batch, batches, classes, X_latent=latent_exp, X_gt=expression ) return adata
e3e696cfb1e207db2159cdc512bc5b4d8f24e47b
3,638,124
def create_labeled_pair(img, gt_center, prop_center, gt_radius, scale): """ Given a crater proposal and ground truth label, this function creates a labeled pair. Returns X, Y, where X is an image, and Y is a set of ground truths. img: an array gt_center: the known ground-truth center point (x, y) (floats) prop_center: the crater proposal center (x, y) (ints) gt_radius: the known ground-truth crater radius in pixels (float) scale: one of [32, 64, 128, 256] (very rough size of crater) follows the scheme: 0<r<8 --> scale=32 8<r<16 --> scale=64 16<r<32 --> scale=128 32<r --> scale=256 (craters bigger than r=64 not supported) """ permitted_scales = [32, 64, 128, 256] if scale not in permitted_scales: msg = f"scale {scale} not permitted. Please use one of: " msg += str(permitted_scales) raise Exception(msg) scale_factor = scale//32 x_offset = (gt_center[0] - prop_center[0])/scale_factor y_offset = (gt_center[1] - prop_center[1])/scale_factor r_scaled = gt_radius/scale Y = (x_offset, y_offset, r_scaled) X = extract_proposal(img, prop_center, scale) return X, Y
b65bc6234183794c432377f8475833dc1f8b72c7
3,638,125
def loader_to_dask(loader_array): """ Map a call to `dask.array.from_array` onto all the elements in ``loader_array``. This is done so that an explicit ``meta=`` argument can be provided to prevent loading data from disk. """ if len(loader_array.shape) != 1: raise ValueError("Can only be used on one dimensional arrays") # The meta argument to from array is used to determine properties of the # array, such as dtype. We explicitly specify it here to prevent dask # trying to auto calculate it by reading from the actual array on disk. meta = np.zeros((0,), dtype=loader_array[0].dtype) to_array = partial(da.from_array, meta=meta) return map(to_array, loader_array)
75039dac3f5ed21e6c1a0b8b5445af30757267cf
3,638,126
import re def list_to_sentences(string): """ Splits text at newlines and puts it back together after stripping new- lines and enumeration symbols, joined by a period. """ if string is None: return None lines = string.splitlines() curr = '' processed = [] for line in lines: stripped = line.strip() # empty line if 0 == len(stripped): if curr: processed.append(re.sub(r'\.\s*$', '', curr)) curr = '' # beginning a new fragment elif not curr or 0 == len(curr): curr = re.sub(r'^[-\d\.\(\)]+\s*', '', stripped) # new line item? true when it starts with "-", "1." or "1)" (with # optional dash) or if the indent level is less than before (simple # whitespace count) (NO LONGER IMPLEMENTED) elif re.match(r'^-\s+', stripped) \ or re.match(r'^\d+\.\s+', stripped) \ or re.match(r'^(-\s*)?\d+\)\s+', stripped): if curr: processed.append(re.sub(r'\.\s*$', '', curr)) curr = re.sub(r'^(-|(\d+\.)|((-\s*)?\d+\)))\s*', '', stripped) # append to previous fragment else: curr = '%s %s' % (curr, stripped) if curr: processed.append(re.sub(r'\.\s*$', '', curr)) sentences = '. '.join(processed) if len(processed) > 0 else '' if len(sentences) > 0: sentences += '.' return sentences
3f155bf501d78cb9263a9cbb0b6d7e4102daeb53
3,638,127
def decode_locations_one_layer(anchors_one_layer, offset_bboxes): """decode the offset bboxes into center bboxes Args: anchors_one_layer: ndarray represents all anchors coordinate in one layer, encode by [y,x,h,w] offset_bboxes: A tensor with any shape ,the shape of lowest axis must be 4, means the offset val in [y,x,h,w] Return: the locations of bboxes encode by [y,x,h,w] """ shape = offset_bboxes.get_shape().as_list() try: i = shape.index(None) shape[i] = -1 except ValueError: pass offset_bboxes = tf.reshape(offset_bboxes,shape=tf.stack([shape[0], -1, shape[-1]])) yref, xref, href, wref = anchors_one_layer ymin = yref - href / 2. xmin = xref - wref / 2. ymax = yref + href / 2. xmax = xref + wref / 2. anchor_ymin = np.float32(ymin) anchor_xmin = np.float32(xmin) anchor_ymax = np.float32(ymax) anchor_xmax = np.float32(xmax) # Transform to center / size. anchor_cy = (anchor_ymax + anchor_ymin) / 2. anchor_cx = (anchor_xmax + anchor_xmin) / 2. anchor_h = anchor_ymax - anchor_ymin anchor_w = anchor_xmax - anchor_xmin ## reshape to -1 ## anchor_cy = np.reshape(anchor_cy,[-1]) anchor_cx = np.reshape(anchor_cx, [-1]) anchor_h = np.reshape(anchor_h, [-1]) anchor_w = np.reshape(anchor_w, [-1]) bboxes_cy = offset_bboxes[:, :, 0] * anchor_h + anchor_cy bboxes_cx = offset_bboxes[:, :, 1] * anchor_w + anchor_cx bboxes_h = tf.exp(offset_bboxes[:, :, 2]) * anchor_h bboxes_w = tf.exp(offset_bboxes[:, :, 3]) * anchor_w cbboxes_out = tf.stack([bboxes_cy, bboxes_cx, bboxes_h, bboxes_w], axis=-1) cbboxes_out = tf.reshape(cbboxes_out, shape=shape) return cbboxes_out
96df6f6b9756cda991e47aea226ea64d5de8648c
3,638,128
def FormatReserved(enum_or_msg_proto): """Format reserved values/names in a [Enum]DescriptorProto. Args: enum_or_msg_proto: [Enum]DescriptorProto message. Returns: Formatted enum_or_msg_proto as a string. """ reserved_fields = FormatBlock('reserved %s;\n' % ','.join( map(str, sum([list(range(rr.start, rr.end)) for rr in enum_or_msg_proto.reserved_range], [])))) if enum_or_msg_proto.reserved_range else '' if enum_or_msg_proto.reserved_name: reserved_fields += FormatBlock('reserved %s;\n' % ', '.join('"%s"' % n for n in enum_or_msg_proto.reserved_name)) return reserved_fields
56b3ad5c2d31a901847c50ad05bd324ca366f101
3,638,129
def download_pepper(load=True): # pragma: no cover """Download scan of a pepper (capsicum). Originally obtained from Laser Design. Parameters ---------- load : bool, optional Load the dataset after downloading it when ``True``. Set this to ``False`` and only the filename will be returned. Returns ------- pyvista.PolyData or str DataSet or filename depending on ``load``. Examples -------- >>> from pyvista import examples >>> dataset = examples.download_pepper() >>> dataset.plot() """ return _download_and_read('pepper.ply', load=load)
3d35c19a5eb36d8a393076b212c5a9789ff61625
3,638,130
def getAllSerial(): """get all device serials found by command adb devices""" _, msgs = shell_command("adb devices") devices = [line for line in msgs if "\tdevice\n" in line] serials = sorted([dev.split()[0] for dev in devices], key=len) return serials
a6a6fd93c4bd27babbca2a15999e19985677e0a1
3,638,131
import io def plot_points(points): """Generate a plot with a varying number of randomly generated points Args: points (int): a number of points to plot Returns: An svg plot with <points> data points """ # data for plotting data = np.random data = np.random.rand(points, 2) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) ax.scatter(data[:,0], data[:,1]) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title(f'There are {points} data points!') ax.grid(True) img = io.StringIO() fig.savefig(img, format='svg') #clip off the xml headers from the image svg_img = '<svg' + img.getvalue().split('<svg')[1] return svg_img
1b3bba3e48ef252e80ad7895ce596e9deb9a0c86
3,638,133
def bisect_status(): """Reproduce the status line git-bisect prints after each step.""" return "Bisecting: {} revisions left to test after this (roughly {} steps).".format( ceil((bisect_revisions() - 1) / 2), bisect_steps_remaining() - 1, )
7ddca3f9de9de3775a52aac1b03a3383ebc487df
3,638,134
def echo_view(): """Call echo() with the Flask request.""" return echo(flask.request)
f51f9f6b2f1f58abcc6f1a0ed9a28056c092f289
3,638,135
def read_bytes_offset_file(f,n_bytes,v=0): """ Used to skip some offset when reading a binary file. Parameters ---------- f : file handler (sys.stdin). n_words : int number of words of type TYPE_WORD. v : int [0 by default] verbose mode if 1. """ words_array = [] try: words_array = np.fromfile(file = f,dtype=np.uint8, count=n_bytes) if v==1: print("vdif - Read "+str(n_bytes)) except EOFError: if v==1: print("vdif - Tried to read "+str(n_bytes)) return([]) return([])
3f68b4ff22dba97e89cc7b036f63655f3d6b756d
3,638,136
from datetime import datetime def person_relationship_dates(node): """Find the nearest start/end dates related to a node's person.""" person = node.people.single() rel = node.people.relationship(person) if rel.start_date is not None: return {'start_date': rel.start_date, 'end_date': rel.end_date} elif rel.start_date is None: # Look at all roles associated with the project, # and determine the date range based on those roles. if isinstance(node, Project): start_dates = [] end_dates = [] for role in node.roles: rel = role.people.relationship(person) if rel.start_date is not None: start_dates.append(rel.start_date) if rel.end_date is not None: end_dates.append(rel.end_date) dates = { 'start_date': min(start_dates) if start_dates else None, 'end_date': max(end_dates) if end_dates else None, } if start_dates and not end_dates: dates['end_date'] = datetime.date.today() return dates else: return {'start_date': None, 'end_date': None}
cd16b3f69979f0d516cb28313ac0dabda9416f30
3,638,137
def oops(): """Lazy way to return an oops reponse.""" return make_response('oops', 400)
e52fb6621ff9fdba48d2d5b81a8cbbb5241eeac8
3,638,138
def get_lowest_energy_conformer( name, mol, gfn_exec=None, settings=None, ): """ Get lowest energy conformer of molecule. Method: 1) ETKDG conformer search on molecule 2) xTB `normal` optimisation of each conformer 3) xTB `opt_level` optimisation of lowest energy conformer 4) save file """ if settings is None: settings = { 'conf_opt_level': 'normal', 'final_opt_level': 'extreme', 'charge': 0, 'no_unpaired_e': 0, 'max_runs': 1, 'calc_hessian': False, 'solvent': None, 'N': 100 } # Check for missing settings. req_settings = [ 'N', 'final_opt_level', 'charge', 'no_unpaired_e', 'max_runs', 'calc_hessian', 'solvent', 'conf_opt_level' ] for i in req_settings: if i not in settings: raise MissingSettingError( f'Settings missing {i}. Has {settings.keys()}.' ) # Run ETKDG on molecule. print(f'....running ETKDG on {name}') cids, confs = build_conformers(mol, N=settings['N']) # Optimize all conformers at normal level with xTB. low_e_conf_id = -100 low_e = 10E20 for cid in cids: name_ = f'{name}_confs/c_{cid}' ey_file = f'{name}_confs/c_{cid}_eyout' mol = update_from_rdkit_conf( mol, confs, conf_id=cid ) mol.write(f'{name}_confs/c_{cid}.mol') # Optimize. opt_mol = optimize_conformer( name=name_, mol=mol, gfn_exec=gfn_exec, opt_level=settings['conf_opt_level'], charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], max_runs=settings['max_runs'], calc_hessian=settings['calc_hessian'], solvent=settings['solvent'] ) opt_mol.write(f'{name}_confs/c_{cid}_opt.mol') # Get energy. calculate_energy( name=name_, mol=opt_mol, gfn_exec=gfn_exec, ey_file=ey_file, charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], solvent=settings['solvent'] ) ey = read_gfnx2xtb_eyfile(ey_file) if ey < low_e: print( 'lowest energy conformer updated with energy: ' f'{ey}, id: {cid}' ) low_e_conf_id = cid low_e = ey # Get lowest energy conformer. low_e_conf = stk.BuildingBlock.init_from_file( f'{name}_confs/c_{low_e_conf_id}_opt.mol' ) low_e_conf.write(f'{name}_confs/low_e_unopt.mol') # Optimize lowest energy conformer at opt_level. low_e_conf = optimize_conformer( name=name+'low_e_opt', mol=low_e_conf, gfn_exec=gfn_exec, opt_level=settings['final_opt_level'], charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], max_runs=settings['max_runs'], calc_hessian=settings['calc_hessian'], solvent=settings['solvent'] ) low_e_conf.write(f'{name}_confs/low_e_opt.mol') # Return molecule. return low_e_conf
e09cb7ed755e2a94075c2206ebd5d7600d633657
3,638,139
def dictize_params(params): """ Parse parameters into a normal dictionary """ param_dict = dict() for key, value in params.iteritems(): param_dict[key] = value return param_dict
4847815622b0855b1056361bff7f7ee02fe6d97a
3,638,140
def dot(x, y, sparse=False): """Wrapper for tf.matmul (sparse vs dense).""" if sparse: res = tf.sparse_tensor_dense_matmul(x, y) else: res = tf.matmul(x, y) return res
fd904bbbaf09ea3207ed8dfa47a17f30ed640ff7
3,638,141
def get_python3_status(classifiers): """ Search through list of classifiers for a Python 3 classifier. """ status = False for classifier in classifiers: if classifier.find('Programming Language :: Python :: 3') == 0: status = True return status
b4bf347dc0bbf3e9a198baa8237f7820cbb86e0b
3,638,142
def drawBeta(s, w, size=1): """Draw beta from its distribution (Eq.9 Rasmussen 2000) using ARS Make it robust with an expanding range in case of failure""" #nd = w.shape[0] 用于多维数据 lb = 0.0 flag = True cnt = 0 while flag: xi = lb + np.logspace(-3 - cnt, 1 + cnt, 200) # update range if needed flag = False try: ars = ARS(fbeta, fbetaprima, xi=xi, lb=0.0, ub=np.inf, \ s=s, w=w) except: cnt += 1 flag = True # draw beta return ars.draw(size)
f0a7826a8411dfc224b2b161d1372984630647e8
3,638,143
def get_unique_map_to_pullback(p, p_a, p_b, z_a, z_b): """Find a unique map to pullback.""" z_p = dict() for value in p: z_keys_from_a = set() if value in p_a.keys(): a_value = p_a[value] z_keys_from_a = set(keys_by_value(z_a, a_value)) z_keys_from_b = set() if value in p_b.keys(): b_value = p_b[value] z_keys_from_b.update(keys_by_value(z_b, b_value)) z_keys = z_keys_from_a.intersection(z_keys_from_b) for z_key in z_keys: z_p[z_key] = value return z_p
6dfcef9e2fa531ae84641be3dd1b4b8836baa958
3,638,144
import torch from typing import Optional def f1_score( pred: torch.Tensor, target: torch.Tensor, num_classes: Optional[int] = None, class_reduction: str = 'micro', ) -> torch.Tensor: """ Computes the F1-score (a.k.a F-measure), which is the harmonic mean of the precision and recall. It ranges between 1 and 0, where 1 is perfect and the worst value is 0. Args: pred: estimated probabilities target: ground-truth labels num_classes: number of classes class_reduction: method to reduce metric score over labels - ``'micro'``: calculate metrics globally (default) - ``'macro'``: calculate metrics for each label, and find their unweighted mean. - ``'weighted'``: calculate metrics for each label, and find their weighted mean. - ``'none'``: returns calculated metric per class Return: Tensor containing F1-score Example: >>> x = torch.tensor([0, 1, 2, 3]) >>> y = torch.tensor([0, 1, 2, 2]) >>> f1_score(x, y) tensor(0.7500) """ return fbeta_score(pred=pred, target=target, beta=1., num_classes=num_classes, class_reduction=class_reduction)
3a5e8bb2915da7aedb16266575bb099f64554c8f
3,638,145
def PyramidPoolingModule(inputs, feature_map_shape): """ Build the Pyramid Pooling Module. """ interp_block1 = InterpBlock(inputs, 1, feature_map_shape) interp_block2 = InterpBlock(inputs, 2, feature_map_shape) interp_block3 = InterpBlock(inputs, 3, feature_map_shape) interp_block6 = InterpBlock(inputs, 6, feature_map_shape) res = tf.concat([inputs, interp_block6, interp_block3, interp_block2, interp_block1], axis=-1) return res
4182291ff038ef89620412f2087c9a8bc23e0cd9
3,638,146
def order(ord): """ `order` is decorator to order the pipeline classes. This decorator specifies a property named "order" to the member function so that we can use the property to order the member functions. This `order` function can be combined with the decorator `with_transforms` which orders the member functions. ```python class AGoodClass: def __init__(self): self.size = 0 @order(1) def first_good_member(self, new): return "first good member" @order(2) def second_good_member(self, new): return "second good member" ``` """ return attributes(order=ord)
7cba4dda9a844733e45698257f206aeb22e2e6b2
3,638,147
import math def collisionIndicator(egoPose, egoPoly, objPose, objPoly): """ Indicator function for collision between ego vehicle and moving object Param: egoPose: ego vehicle objPose: pose of object Return: col_indicator: (float) collision indicator between two object """ dMean = np.array([egoPose.x_m-objPose.x_m, egoPose.y_m-objPose.y_m]) dCov = egoPose.covUtm + objPose.covUtm diff_yaw = abs(egoPose.yaw_rad-objPose.yaw_rad) col_indicator = 0 # handle parallel and orthogonal case if abs(math.remainder(diff_yaw, np.pi/2)) < param._COLLISION_ORTHO_THRES: poly, bound = gaussian.minkowskiSumOrthogonal(egoPoly, objPoly) col_indicator = collisionIndicatorComputeSimple(bound, dMean, dCov) # handle general case else: poly, bound = gaussian.minkowskiSum(egoPoly, objPoly) col_indicator = collisionIndicatorCompute( poly=poly, bound=bound, dMean=dMean, dCov=dCov) return col_indicator
074852f5a12cd18c1b201ba4d72e2f710c21417c
3,638,148
def lookup_listener(param): """ Flags a method as a @lookup_listener. This method will be updated on the changes to the lookup. The lookup changes when values are registered in the lookup or during service activation. @param param: function being attached to @return: """ def decor(func): if not hasattr(func, "lookup_decor"): func.lookup_decor = [param] else: func.lookup_decor.append(param) return func return decor
5d053e20ca8c2316aa46f27809b8e0ae59077d32
3,638,149
async def read_multi_analog_inputs(app, addr): """ Execute a single request using `ReadPropertyMultipleRequest`. This will read the first 40 analog input values from the remote device. :param app: An app instance :param addr: The network address of the remote device :return: """ read_access_specs = [] for i in range(10): read_access_specs.append( ReadAccessSpecification( objectIdentifier=('analogInput', i), listOfPropertyReferences=[PropertyReference(propertyIdentifier='presentValue')], ) ) return await app.execute_request( ReadPropertyMultipleRequest( listOfReadAccessSpecs=read_access_specs, destination=Address(addr) ), )
9b2d6f820bcb6fc0ee9ef4ad65c7c7654e4a47b1
3,638,150
def lookupBlock(blockName): """ Look up block name string in name list data value (e.g. color) override may be appended to the end e.g. stained_hardened_clay_10 Note: block name lookup is case insensitive """ blockName = blockName.upper() try: try: name, data = blockName.rsplit('_', 1) except ValueError: return Blocks[blockName] else: try: data = int(data) except ValueError: return Blocks[blockName] return Block(Blocks[name].id, data) except KeyError: print 'Invalid block name:', blockName sys.exit()
f1cd8a43751df8bf710a1d0379887725c5a5e400
3,638,151
def factorize(values, sort=False, na_sentinel=-1, size_hint=None): """Encode the input values as integer labels Parameters ---------- values: Series, Index, or CuPy array The data to be factorized. na_sentinel : number, default -1 Value to indicate missing category. Returns ------- (labels, cats) : (cupy.ndarray, cupy.ndarray or Index) - *labels* contains the encoded values - *cats* contains the categories in order that the N-th item corresponds to the (N-1) code. Examples -------- >>> import cudf >>> data = cudf.Series(['a', 'c', 'c']) >>> codes, uniques = cudf.factorize(data) >>> codes array([0, 1, 1], dtype=int8) >>> uniques StringIndex(['a' 'c'], dtype='object') See Also -------- cudf.Series.factorize : Encode the input values of Series. """ if sort: raise NotImplementedError( "Sorting not yet supported during factorization." ) if na_sentinel is None: raise NotImplementedError("na_sentinel can not be None.") if size_hint: warn("size_hint is not applicable for cudf.factorize") return_cupy_array = isinstance(values, cp.ndarray) values = Series(values) cats = values._column.dropna().unique().astype(values.dtype) name = values.name # label_encoding mutates self.name labels = values._label_encoding(cats=cats, na_sentinel=na_sentinel).values values.name = name return labels, cats.values if return_cupy_array else Index(cats)
be4001dc9873ace10dce9100f7375718db783b24
3,638,152
def make_proc(code, variables, path, *, use_async=False): # pylint: disable=redefined-builtin """Compile this code block to a procedure. Args: code: the code block to execute. Text, will be indented. vars: variable names to pass into the code path: the location where the code is stored use_async: False if sync code, True if async, None if in thread Returns: the procedure to call. All keyval arguments will be in the local dict. """ hdr = f"""\ def _proc({ ",".join(variables) }): """ if use_async: hdr = "async " + hdr code = hdr + code.replace("\n", "\n ") code = compile(code, str(path), "exec") return partial(_call_proc, code, variables)
7f4e6f279ede515f71dbee6c3a5a796ee76815d2
3,638,153
def ratings_std(df): """calculate standard deviation of ratings from the given dataframe parameters ---------- df (pandas dataframe): a dataframe cotanis all ratings Returns ------- standard deviation(float): standard deviation of ratings, keep 4 decimal """ std_value = df['ratings'].std() std_value = round(std_value,4) return std_value
b1bf00d25c0cee91632eef8248d5e53236dd4526
3,638,154
def save(objct, fileoutput, binary=True): """ Save 3D object to file. (same as `write()`). Possile extensions are: - vtk, vti, ply, obj, stl, byu, vtp, xyz, tif, vti, mhd, png, bmp. """ return write(objct, fileoutput, binary)
c310e5be80bcfb56f2c6bd9f9734f171a0fbd2c6
3,638,155
def read_pfm(fname): """ Load a pfm file as a numpy array Args: fname: path to the file to be loaded Returns: content of the file as a numpy array """ file = open(fname, 'rb') color = None width = None height = None scale = None endian = None header = file.readline().rstrip() if b'PF' == header: color = True elif b'Pf' == header: color = False else: raise Exception('Not a PFM file! header: ' + header) dims = file.readline() try: width, height = list(map(int, dims.split())) except: raise Exception('Malformed PFM header.') scale = float(file.readline().rstrip()) if scale < 0: # little-endian endian = '<' scale = -scale else: endian = '>' # big-endian data = np.fromfile(file, endian + 'f') shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) return data, scale
3c1f90965479cd1fdaaecbd2c740d807d952f687
3,638,156
import math def broaden_spectrum(spect, sigma): """ Broadens a peak defined in spect by the sigma factor and returns the x and y data to plot. Args: ---- spect (np.ndarray) -- input array containing the peak info for the individual peak to be broadened. sigma (float) -- gaussian broadening term for the peaks given. Returns: -------- plot_vals (list) -- a 2D array containing the x and y values for plotting. """ # Assertions # assert isinstance(spect, (np.ndarray, list)), \ # 'Input must be a list or a numpy array.' # assert isinstance(sigma, float), \ # 'sigma value must be a float' #min of the spectrum **FUTURE FEATURE** #min_x = min(spect[0]) - 50. min_x = spect[0] - 50 #max of the spectrum **FUTURE FEATURE** #max_x = max(spect[0]) + 50. max_x = spect[0] + 50 x = np.linspace(start=min_x, stop=max_x, num=10000) y = [0. for k in range(len(x))] for i in range(len(x)): #**FUTURE FEATURE** #for j in range(len(spect[0])): # y[j] += spect[1][j] * math.exp(-0.5 * (((x[i] - spect[0][j]) ** 2\ # ) / sigma ** 2)) y[i] += spect[1] * math.exp(-0.5 * (((x[i] - spect[0]) ** 2) / \ sigma ** 2)) plot_vals = [x, y] return plot_vals
9c177dfaf282ad16b9742f96bb70e971a2fce6c6
3,638,157
import requests def mv_audio(serial_id, audio_setting): """ This function will change the audio recording settings to {audio_setting} in the meraki dashboard for the mv camera with the {serial_id} :param: serial_id: the serial id for the meraki mv camera :param: audio_setting: 'true' to turn on audio recording, 'false' to turn off audio recording :return: api response status code """ url = f"https://api.meraki.com/api/v1/devices/{serial_id}/camera/qualityAndRetention" payload = f'''{{ "audioRecordingEnabled": {audio_setting} }}''' headers = { "Content-Type": "application/json", "Accept": "application/json", "X-Cisco-Meraki-API-Key": API_KEY } response = requests.request('PUT', url, headers=headers, data=payload) if response.status_code == 403: print(f'Camera-serial id:{serial_id} audio recording have been changed to: {audio_setting}') return response.status_code
cb6f2415d8950513760afdcfbe11993999071b73
3,638,158
from typing import List def lsp_text_edits(changed_file: ChangedFile) -> List[TextEdit]: """Take a jedi `ChangedFile` and convert to list of text edits. Handles inserts, replaces, and deletions within a text file """ old_code = ( changed_file._module_node.get_code() # pylint: disable=protected-access ) new_code = changed_file.get_new_code() opcode_position_lookup_old = get_opcode_position_lookup(old_code) text_edits = [] for opcode in get_opcodes(old_code, new_code): if opcode.op in _OPCODES_CHANGE: start = opcode_position_lookup_old[opcode.old_start] end = opcode_position_lookup_old[opcode.old_end] start_char = opcode.old_start - start.range_start end_char = opcode.old_end - end.range_start new_text = new_code[opcode.new_start : opcode.new_end] text_edits.append( TextEdit( range=Range( start=Position(line=start.line, character=start_char), end=Position(line=end.line, character=end_char), ), new_text=new_text, ) ) return text_edits
9069db3931606874e0cf8a796ffe6acb88f4ad8a
3,638,159
import hashlib def _hash(file_name, hash_function=hashlib.sha256): """compute hash of file `file_name`""" with open(file_name, 'rb') as file_: return hash_function(file_.read()).hexdigest()
463d692116fbb85db9f1a537cbcaa5d2d019ba05
3,638,160
def prepare_cmf(observer='1931_2deg'): """Safely returns the color matching function dictionary for the specified observer. Parameters ---------- observer : `str`, {'1931_2deg', '1964_10deg'} the observer to return Returns ------- `dict` cmf dict Raises ------ ValueError observer not 1931 2 degree or 1964 10 degree """ if observer.lower() == '1931_2deg': return prepare_cie_1931_2deg_observer() elif observer.lower() == '1964_10deg': return prepare_cie_1964_10deg_observer() else: raise ValueError('observer must be 1931_2deg or 1964_10deg')
82422c521fc9b55acbad6ab0fbb31a67ec5f71b9
3,638,161
def list_project_milestones(request): """ list project specific milestones """ project_id = request.GET.get('project_id') project = Project.objects.get(id=project_id) template = loader.get_template('project_management/list_project_milestones.html') open_status = Status.objects.get(name="Open") if Milestone.objects.filter(project_id=project.id, status=open_status).exists(): open_milestones = Milestone.objects.filter(project_id=project.id, status=open_status) open_count = Milestone.objects.filter(project_id=project.id, status=open_status).count() else: open_milestones = "" open_count = 0 onhold_status = Status.objects.get(name="Onhold") if Milestone.objects.filter(project_id=project.id, status=onhold_status).exists(): onhold_count = Milestone.objects.filter(project_id=project.id, status=onhold_status).count() else: onhold_count = 0 terminated_status = Status.objects.get(name="Terminated") if Milestone.objects.filter(project_id=project.id, status=terminated_status).exists(): terminated_count = Milestone.objects.filter(project_id=project.id, status=terminated_status).count() else: terminated_count = 0 completed_status = Status.objects.get(name="Completed") if Milestone.objects.filter(project_id=project.id, status=completed_status).exists(): completed_count = Milestone.objects.filter(project_id=project.id, status=completed_status).count() else: completed_count = 0 context = { 'project_id': project.id, 'project_name': project.name, 'open_milestones': open_milestones, 'completed_count': completed_count, 'onhold_count': onhold_count, 'terminated_count': terminated_count, 'open_count': open_count } return HttpResponse(template.render(context, request))
b7a8cc729f17f9e8640ae83d5f70ede6ce9c1ece
3,638,163
from opax import apply_updates, transform_gradients from re import T from typing import Tuple from typing import Any from typing import Union def build_update_fn(loss_fn, *, scan_mode: bool = False): """Build a simple update function. *Note*: The output of ``loss_fn`` must be ``(loss, (aux, model))``. Arguments: loss_fn: The loss function. scan_mode: If true, use `(model, optimizer)` as a single argument. Example: >>> def mse_loss(model, x, y): ... y_hat = model(x) ... loss = jnp.mean(jnp.square(y - y_hat)) ... return loss, (loss, model) ... >>> update_fn = pax.utils.build_update_fn(mse_loss) >>> net = pax.Linear(2, 2) >>> optimizer = opax.adam(1e-4)(net.parameters()) >>> x = jnp.ones((32, 2)) >>> y = jnp.zeros((32, 2)) >>> net, optimizer, loss = update_fn(net, optimizer, x, y) """ # pylint: disable=import-outside-toplevel def _update_fn(model: T, optimizer: O, *inputs, **kwinputs) -> Tuple[T, O, Any]: """An update function. Note that: ``model`` and ``optimizer`` have internal states. We have to return them in the output as jax transformations (e.g., ``jax.grad`` and ``jax.jit``) requires pure functions. Arguments: model_and_optimizer: (a callable pax.Module, an optimizer), inputs: input batch. Returns: model_and_optimizer: updated (model, optimizer), aux: the aux info. """ assert isinstance(model, Module) assert isinstance(optimizer, Module) model_treedef = jax.tree_structure(model) grads, (aux, model) = grad(loss_fn, has_aux=True)(model, *inputs, **kwinputs) if jax.tree_structure(model) != model_treedef: raise ValueError("Expecting an updated model in the auxiliary output.") params = select_parameters(model) updates, optimizer = transform_gradients(grads, optimizer, params=params) params = apply_updates(params, updates=updates) model = update_parameters(model, params=params) return model, optimizer, aux def _update_fn_scan( model_and_optimizer: Union[C, Tuple[T, O]], *inputs, **kwinputs ) -> Tuple[C, Any]: model, optimizer = model_and_optimizer model, optimizer, aux = _update_fn(model, optimizer, *inputs, **kwinputs) return (model, optimizer), aux return _update_fn_scan if scan_mode else _update_fn
28d6ac1b38ae4cf4e9c7e1e50ffe89d87437e01c
3,638,164
def _get_perf_hint(hint, index: int, _default=None): """ Extracts a "performance hint" value -- specified as either a scalar or 2-tuple -- for either the left or right Dataset in a merge. Parameters ---------- hint : scalar or 2-tuple of scalars, optional index : int Indicates whether the hint value is being extracted for the left or right Dataset. 0 = left, 1 = right. _default : optional Optional default value, returned if `hint` is None. Returns ------- Any The extracted performance hint value. """ if hint is None: return _default elif isinstance(hint, tuple): return hint[index] else: return hint
d67a70d526934dedaa9f571970e27695404350f2
3,638,165
def synchronized_limit(lock): """ Synchronization decorator; provide thread-safe locking on a function http://code.activestate.com/recipes/465057/ """ def wrap(f): def synchronize(*args, **kw): if lock[1] < 10: lock[1] += 1 lock[0].acquire() try: return f(*args, **kw) finally: lock[1] -= 1 lock[0].release() else: raise Exception('Too busy') return synchronize return wrap
a28adfca434b7feaa5aa33c2ba4d1ed2e48cf916
3,638,166
from scipy.ndimage import affine_transform def fast_warp(img, tf, output_shape=(50, 50), mode='constant', order=1): """ This wrapper function is faster than skimage.transform.warp """ m = tf._matrix res = np.zeros(shape=(output_shape[0], output_shape[1], 3), dtype=floatX) trans, offset = m[:2, :2], (m[0, 2], m[1, 2]) res[:, :, 0] = affine_transform(img[:, :, 0].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order) res[:, :, 1] = affine_transform(img[:, :, 1].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order) res[:, :, 2] = affine_transform(img[:, :, 2].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order) return res
989b6edc370b7ab92685741f70d8346717d60505
3,638,167
def _get_detections(generator, model, score_threshold=0.05, max_detections=400, save_path=None): """ Get the detections from the model using the generator. The result is a list of lists such that the size is: all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes] # Arguments generator : The generator used to run images through the model. model : The model to run on the images. score_threshold : The score confidence threshold to use. max_detections : The maximum number of detections to use per image. save_path : The path to save the images with visualized detections to. # Returns A list of lists containing the detections for each image in the generator. """ all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())] for i in progressbar.progressbar(range(generator.size()), prefix='Running network: '): raw_image = generator.load_image(i) image = generator.preprocess_image(raw_image.copy()) image, scale = generator.resize_image(image) if keras.backend.image_data_format() == 'channels_first': image = image.transpose((2, 0, 1)) # run network boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3] # correct boxes for image scale boxes /= scale # select indices which have a score above the threshold indices = np.where(scores[0, :] > score_threshold)[0] # select those scores scores = scores[0][indices] # find the order with which to sort the scores scores_sort = np.argsort(-scores)[:max_detections] # select detections image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) # if save_path is not None: # draw_annotations(raw_image, generator.load_annotations(i), label_to_name=generator.label_to_name) # draw_detections(raw_image, image_boxes, image_scores, image_labels, label_to_name=generator.label_to_name) # # cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image) # copy detections to all_detections for label in range(generator.num_classes()): if not generator.has_label(label): continue all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1] return all_detections
1800bba86a21c07356fd0075d8d911b9ec55540b
3,638,168
def BF (mu, s2, noise_var=None, pps=None): """ Buzzi-Ferraris et al.'s design criterion. - Buzzi-Ferraris and Forzatti (1983) Sequential experimental design for model discrimination in the case of multiple responses. Chem. Eng. Sci. 39(1):81-85 - Buzzi-Ferraris et al. (1984) Sequential experimental design for model discrimination in the case of multiple responses. Chem. Eng. Sci. 39(1):81-85 - Buzzi-Ferraris et al. (1990) An improved version of sequential design criterion for discrimination among rival multiresponse models. Chem. Eng. Sci. 45(2):477-481 """ mu, s2, noise_var, _, n, M, _ = _reshape(mu, s2, noise_var, None) s2 += noise_var dc = np.zeros(n) for i in range(M-1): for j in range(i+1,M): iSij = np.linalg.inv(s2[:,i] + s2[:,j]) t1 = np.trace( np.matmul(noise_var, iSij), axis1=1, axis2=2 ) r1 = np.expand_dims(mu[:,i] - mu[:,j],2) t2 = np.sum( r1 * np.matmul(iSij, r1), axis=(1,2) ) dc += t1 + t2 return dc
f51e7c2a6827e1a1283afc8d0b8af5e3fe66a034
3,638,169
def deploy(usr, pwd, path=getcwd(), venv=None): """release on `pypi.org`""" log(INFO, ICONS["deploy"] + 'deploy release on `pypi.org`') # check dist module('twine', 'check --strict dist/*', path=path, venv=venv) # push to pypi.org return module("twine", "upload -u %s -p %s dist/*" % (usr, pwd), level=LEVEL, path=path, venv=venv)
cfb13ac61addfc783a0f6e5963fb44838142f77d
3,638,170
def most_interval_scheduling(interval_list): """ 最多区间调度:优先选择'end'值小的区间 Args: interval_list(list): 区间列表 Returns: scheduling_list(list): 去重实体列表 """ scheduling_list = list() sorted_interval_list = sorted(interval_list, key=lambda x: x['end']) size = len(sorted_interval_list) scheduling_list.append(sorted_interval_list[0]) for i in range(1, size): if scheduling_list[-1]['end'] <= sorted_interval_list[i]['start']: scheduling_list.append(sorted_interval_list[i]) return scheduling_list
82b1d051221043025497c95d9657245b5b507bde
3,638,171
def kernel_program(inputfile, dimData, Materials, dict_nset_data, \ dict_elset_matID={}, dict_elset_dload={}): """The kernel_program should be called by the job script (e.g., Job-1.py) where the user defines: - inputfile: the name of the input file - dimData: the dimensional data (see class dimension_data) - Materials: the list of materials used in the analysis (see package Elements) - dict_nset_data: the dictionary of nset_data (for bcds and concentrated loads) where the keys are nset names read from inputfile and values are nset_data as defined in the class nset_data and optionally: - dict_elset_matID: a dictionary where each key is an elset name defined in inputfile, and its value is the corresponding index of material in the Materials list for elements in this elset. This dictionary needs to be defined when multiple materials/material sections are present in the model - dict_elset_dload: a dictionary where each key is an elset name defined in inputfile, and its value is the corresponding dload_data (see class dload_data) for all elements in this elset, meaning that these elements are subjected to the distributed loading defined by this dload_data. This is needed when distributed loading is present in the model""" ########################################################################### # Preprocessing ########################################################################### # Read data from Abaqus input file and form abaqus parts parts = read_abaqus.read_parts_from_inputfile(inputfile) # check if there is only one part # in the future, consider making a loop over all parts if(not len(parts)==1): raise ValueError('Only a single part is supported!') # verification of dimensional parameters before proceeding verify_dimensional_parameters(parts[0], dimData) # form lists of nodes and elem_lists (eltype and elem indices of this type) nodes = form_nodes(parts[0]) elem_lists = form_elem_lists(parts[0], dimData.NDOF_NODE, dimData.ELEM_TYPES,\ dict_elset_matID) # form lists of bcds and cloads [bcd_dofs, bcd_values, cload_dofs, cload_values] = \ form_bcds_cloads(parts[0], dict_nset_data, dimData.NDOF_NODE) # form lists of elset for distributed loads list_dload_data = form_list_dload_data(parts[0], dict_elset_dload) ########################################################################### # Assembler # obtain the full stiffness matrix K and external distributed force vector f ########################################################################### # form the list of all the elems for assembly elems = [] for elist in elem_lists: # verify material type before assembly for elem in elist.elems: elem.verify_material(Materials[elem.matID]) elems.extend(elist.elems) # call assembler [K, f] = assembler(nodes, elems, dimData.NDOF_NODE, Materials, list_dload_data) ########################################################################### # Solver # modify the stiffness matrix and force vector based on applied bcds and loads # obtain dof vector a and reaction force vector RF, both size ndof by 1 ########################################################################### [a, RF] = solver(K, f, bcd_dofs, bcd_values, cload_dofs, cload_values) return [parts, nodes, elem_lists, f, a, RF]
ac65f5c1355ed4097018ded03a9484cf77e7bf17
3,638,172
from typing import Collection def rmse_metric(predicted: Collection, actual: Collection) -> float: """ Root-mean-square error metric. Args: predicted (list): prediction values. actual (list): reference values. Returns: root-mean-square-error metric. """ return np.sqrt(np.mean(np.subtract(predicted, actual) ** 2))
87b14ae0c99db10ffaa8a352d7deb6007ba1f00e
3,638,174
import json def set_user_data(session_id, user_data): """ this function temporarily stores data transmitted by user, when POSTed by the user device; the data is then picked up by the page ajax polling mechanism """ return session_id if red.set(K_USER_DATA.format(session_id), json.dumps(user_data), ex=30) else None
c3230dd751d2b34f8abb142be5dc170c639258b5
3,638,175
def find_keys(info: dict) -> dict: """Determines all the keys and their parent keys. """ avail_keys = {} def if_dict(dct: dict, prev_key: str): for key in dct.keys(): if key not in avail_keys: avail_keys[key] = prev_key if type(dct[key]) == dict: if_dict(dct[key], key + '[].') elif type(dct[key]) == list: for item in dct[key]: if type(item) == dict: if_dict(item, key + '[].') if_dict(info, '') # print(avail_keys) return avail_keys
8d0bed361767d62bbc3544efdfe47e8e1065f462
3,638,176
def valid(exc, cur1, cur2=None, exclude=None, exclude_cur=None): """ Find if the given exc satisfies currency 1 (currency 2) (and is not exclude) (and currency is not exclude) """ if exclude is not None and exc == exclude: return False curs = [exc.to_currency, exc.from_currency] if exclude_cur is not None and exclude_cur in curs: return False if cur2 is not None: return cur1 in curs and cur2 in curs return cur1 in curs
84a37e669fee120aed8fbc57ab13d5f70f583cf4
3,638,177
import math def arc( x: float, y: float, radius: float, start: float, stop: float, quantization: float = 0.1, ) -> np.ndarray: """Build a circular arc path. Zero angles refer to east of unit circle and positive values extend counter-clockwise. Args: x: center X coordinate y: center Y coordinate radius: circle radius start: start angle (degree) stop: stop angle (degree) quantization: maximum length of linear segment Returns: arc path """ def normalize_angle(a): while a > 360: a -= 360 while a < 0: a += 360 return a start = normalize_angle(start) stop = normalize_angle(stop) if stop < start: stop += 360 elif start == stop: raise ValueError("start and stop angles must have different values") n = math.ceil((stop - start) / 180 * math.pi * radius / quantization) angle = np.linspace(start, stop, n) angle[angle == 360] = 0 angle *= math.pi / 180 return radius * (np.cos(-angle) + 1j * np.sin(-angle)) + complex(x, y)
99ce50042e4199c38fdb0a6e79134fab0cd30196
3,638,178
def build_feature_columns(schema): """Build feature columns as input to the model.""" # non-numeric columns exclude = ['customer_id', 'brand', 'promo_sensitive', 'weight', 'label'] # numeric feature columns numeric_column_names = [col for col in schema.names if col not in exclude] numeric_columns = [ tf.feature_column.numeric_column(col) for col in numeric_column_names ] # identity column identity_column = tf.feature_column.categorical_column_with_identity( key='promo_sensitive', num_buckets=2) # DNNClassifier only accepts dense columns indicator_column = tf.feature_column.indicator_column(identity_column) # numeric weight column weight_column = tf.feature_column.numeric_column('weight') feature_columns = numeric_columns + [indicator_column] return feature_columns, weight_column
048ebe72e291db6a92ee9d2d903baf6e10df9bf2
3,638,179
def get_mol_func(smiles_type): """ Returns a function pointer that converts a given SMILES type to a mol object. :param smiles_type: The SMILES type to convert VALUES=(deepsmiles.*, smiles, scaffold). :return : A function pointer. """ if smiles_type.startswith("deepsmiles"): _, deepsmiles_type = smiles_type.split(".") return lambda deepsmi: to_mol(from_deepsmiles(deepsmi, converter=deepsmiles_type)) else: return to_mol
7af4260cb79c21e763ee2ad4a64c38b5e3fd84fe
3,638,180
def index(): """Return to the homepage.""" return render_template("index.html")
6b3a8595173d8919478ae4a0f4dc7f8a3958af56
3,638,181
def species_thermo_value(spc_dct): """ species enthalpy at 298 """ return spc_dct['H298']
7684b0ace0fa9717cb1cc3ea83bb6be8099c4bf6
3,638,182
async def async_setup(hass: HomeAssistant, config: dict): """Set up the Flood integration.""" hass.data.setdefault(DOMAIN, {}) return True
5d404d856346e26f8c7f32102c6286f05ac91f8c
3,638,184
def typehint(x, typedict): """Replace the dtypes in `x` keyed by `typedict` with the dtypes in `typedict`. """ dtype = x.dtype lhs = dict(zip(dtype.fields.keys(), map(first, dtype.fields.values()))) dtype_list = list(merge(lhs, typedict).items()) return x.astype(np.dtype(sort_dtype_items(dtype_list, dtype.names)))
a5e8ab9e94f3d622467a3e486bc21ffed1336878
3,638,185
import random def balance(samples, labels, balance_factor, adjust_func): """create a balanced dataset by subsampling classes or generating new samples""" grouped = group_by_label(samples, labels) if balance_factor <= 1.0: largest_group_size = max([len(x[1]) for x in grouped]) target_group_size = int(largest_group_size * balance_factor) else: target_group_size = int(balance_factor) grouped_balanced = [] for label, group in grouped: if len(group) > target_group_size: # print(label, 1.0) group_resized = random.sample(group, target_group_size) else: # print(label, (len(group) * 1.0) / target_group_size) group_resized = [x for x in group] while len(group_resized) < target_group_size: group_resized.append(adjust_func(random.choice(group))) grouped_balanced.append((label, group_resized)) pairs = [(y, x[0]) for x in grouped_balanced for y in x[1]] return zip(*pairs)
128aaec1b3c60348190394c5ceab6b561eba6f51
3,638,186
import random def generate_sha1(string, salt=None): """ Generates a sha1 hash for supplied string. Doesn't need to be very secure because it's not used for password checking. We got Django for that. :param string: The string that needs to be encrypted. :param salt: Optionally define your own salt. If none is supplied, will use a random string of 5 characters. :return: Tuple containing the salt and hash. """ if not salt: salt = sha_constructor(str(random.random())).hexdigest()[:5] hash = sha_constructor(salt + str(string)).hexdigest() return (salt, hash)
9ff8cbfd987972fea6712f90752c9d94aeb78b44
3,638,187
def sentences_from_doc(ttree_doc, language, selector): """Given a Treex document, return a list of sentences in the given language and selector.""" return [bundle.get_zone(language, selector).sentence for bundle in ttree_doc.bundles]
d9c09249171d5d778981fb98a8a7f53765518479
3,638,188
import math def demo_func(par): """Test function to optimize.""" x = par['x'] y = par['y'] z = par['z'] p = par['p'] s = par['str'] funcs = { 'sin': math.sin, 'cos': math.cos, } return (x + (-y) * z) / ((funcs[s](p) ** 2) + 1)
5899be5709c4a6ecf09cf9852c1b7569d85616b3
3,638,189
def file_to_list(filename): """ Read in a one-column txt file to a list :param filename: :return: A list where each line is an element """ with open(filename, 'r') as fin: alist = [line.strip() for line in fin] return alist
33bee263b98c4ff85d10191fa2f5a0f095c6ae4b
3,638,190
def regular_ticket_price(distance_in_km: int) -> float: """ calculate the regular ticket price based on the given distance :int distance_in_km: """ # source --> Tarife 601 Chapter 10.1.3 on https://www.allianceswisspass.ch/de/Themen/TarifeVorschriften price_ticket_per_km = {range(1, 5): 44.51, range(5, 15): 42.30, range(15, 49): 37.24, range(49, 151): 26.46, range(151, 201): 25.71, range(201, 251): 22.85, range(251, 301): 20.63, range(301, 481): 20.09, range(481, 1501): 19.85, } price = calculate_price(distance_in_km, price_ticket_per_km) if price < MINDESTPRICE_IN_CHF: price = MINDESTPRICE_IN_CHF return price
220b4eb9362b9a48b5c3d2e2ad17e23755098785
3,638,191
def find_cal_indices(datetimes): """ Cal events are any time a standard is injected and being quantified by the system. Here, they're separated as though any calibration data that's more than 60s away from the previous cal data is a new event. :param epoch_time: array of epoch times for all of the supplied data :return: list of cal events indices, where each index is the beginning of a new cal event """ diff = datetimes.diff() indices = diff.loc[diff > pd.Timedelta(seconds=60)].index.values.tolist() # subtract one from all indices return indices
2e823e5ffc5fb509639a2d5746bd26af77a650ae
3,638,192
def rename(isamAppliance, id, new_name, check_mode=False, force=False): """ Rename a Password Strength """ if force is True or _check(isamAppliance, id) is True: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_put( "Rename a Password Strength", "/wga/pwd_strength/{0}".format(id), { 'id': id, 'new_name': new_name }) return isamAppliance.create_return_object()
b140e618370cff1086b27fef13e0ff91b22cf075
3,638,193
import asyncio import traceback def reinvoke_on_edit(ctx, *additional_messages: discord.Message, timeout: float = 600) -> None: # noinspection PyUnresolvedReferences """ Watches a given context for a given period of time. If the message that invoked the context is edited within the time period, then the invoking message plus any additional messages are deleted. The context's command is then reinvoked with the new message body. Parameters: ctx: A :class:`discord.ext.commands.Context` to listen to. Create one with `bot.get_context` if you are in an event instead. additional_messages: Any additional messages to also destroy on close. timeout: The timeout to wait for before the call terminates. This defaults to `None`, which is a special case depending on whether or not the `ctx` that was passed was actually a `BaseNavigator` object. If the latter holds, then the timeout will trigger as soon as the navigator timeout triggers. Note: To invoke this on a response that is being paginated using the `libneko.pagination` module, you should attempt to invoke it like so:: >>> factory = ... >>> nav = factory.build() >>> nav.start(ctx) >>> reinvoke_on_edit(ctx, *nav.all_messages) >>> >>> # or if you just have a nav >>> >>> nav = StringNavigator(...) >>> nav.start() >>> reinvoke_on_edit(ctx, *nav.all_messages) """ if ctx.command is None: raise ValueError("Cannot reinvoke a non-valid command or non-command invocation") async def handle_wait_for_edit_or_close(): try: # Triggered when we should kill our events. event = asyncio.Event() def set_on_exit(f): @neko3.functional.wraps(f) async def wrapper(): r = await f() event.set() return r return wrapper @set_on_exit async def wait_for_close(): try: await ctx.bot.wait_for("message_delete", check=lambda m: m.id == ctx.message.id, timeout=timeout) except (asyncio.CancelledError, asyncio.TimeoutError): pass @set_on_exit async def wait_for_edit(): try: def predicate(before, after): try: # Only respond to this message if after.id != ctx.message.id: return False elif before.content == after.content: # Again, something went weird. return False elif not after.content.startswith(ctx.prefix): return False else: # Ensure same command. invoked = ctx.message.content[len(ctx.prefix) :].lstrip() return invoked.startswith(ctx.invoked_with) except Exception: traceback.print_exc() _, after = await ctx.bot.wait_for("message_edit", check=predicate) new_ctx = await ctx.bot.get_context(after) asyncio.ensure_future(asyncio.gather(*[m.delete() for m in additional_messages]), loop=ctx.bot.loop) ctx.bot.loop.create_task(ctx.command.reinvoke(new_ctx)) except asyncio.CancelledError: pass except Exception: traceback.print_exc() tasks = [ctx.bot.loop.create_task(wait_for_close()), ctx.bot.loop.create_task(wait_for_edit())] # On either of these events triggering, we kill the lot. await event.wait() for task in tasks: try: task.cancel() task.result() except Exception: pass except Exception: traceback.print_exc() ctx.bot.loop.create_task(handle_wait_for_edit_or_close())
523048caadac3efc0f8065623a6448077dcb05fd
3,638,194
def gen_model_forms(form, model): """Creates a dict of forms. model_forms[0] is a blank form used for adding new model objects. model_forms[m.pk] is an editing form pre-populated the fields of m""" model_forms = {0: form()} for m in model.objects.all(): model_forms[m.pk] = form(instance=m) return model_forms
28bf3f007a7f8f971c18980c84a7841fd116898f
3,638,195
def _state_size_with_prefix(state_size, prefix=None): """Helper function that enables int or TensorShape shape specification. This function takes a size specification, which can be an integer or a TensorShape, and converts it into a list of integers. One may specify any additional dimensions that precede the final state size specification. Args: state_size: TensorShape or int that specifies the size of a tensor. prefix: optional additional list of dimensions to prepend. Returns: result_state_size: list of dimensions the resulting tensor size. """ result_state_size = tensor_shape.as_shape(state_size).as_list() if prefix is not None: if not isinstance(prefix, list): raise TypeError("prefix of _state_size_with_prefix should be a list.") result_state_size = prefix + result_state_size return result_state_size
7f8aaab1dd42b6470dce08f9a13a59d8cdc66f4f
3,638,196
def as_pandas(cursor, coerce_float=False): """Return a pandas `DataFrame` out of an impyla cursor. This will pull the entire result set into memory. For richer pandas-like functionality on distributed data sets, see the Ibis project. Parameters ---------- cursor : `HiveServer2Cursor` The cursor object that has a result set waiting to be fetched. coerce_float : bool, optional Attempt to convert values of non-string, non-numeric objects to floating point. Returns ------- DataFrame """ from pandas import DataFrame # pylint: disable=import-error names = [metadata[0] for metadata in cursor.description] return DataFrame.from_records(cursor.fetchall(), columns=names, coerce_float=coerce_float)
e1a9f5ba9b589a9c94f6df1a379833d8d7176d2b
3,638,197
def check_ip(ip, network_range): """ Test if the IP is in range Range is expected to be in CIDR notation format. If no MASK is given /32 is used. It return True if the IP is in the range. """ netItem = str(network_range).split('/') rangeIP = netItem[0] if len(netItem) == 2: rangeMask = int(netItem[1]) else: rangeMask = 32 try: ripInt = ip2int(rangeIP) ipInt = ip2int(ip) result = not ((ipInt ^ ripInt) & 0xFFFFFFFF << (32 - rangeMask)); except: result = False return result
be2bf16e4b000ff4b106761ea99bf69596d3ece2
3,638,198
async def index(_request: HttpRequest) -> HttpResponse: """A request handler which provides an index of the compression methods""" html = """ <!DOCTYPE html> <html> <body> <ul> <li><a href='/gzip'>gzip</a></li> <li><a href='/deflate'>deflate</a></li> <li><a href='/compress'>compress</a></li> </ul> </body> </html> """ return HttpResponse( 200, [(b'content-type', b'text/html')], text_writer(html) )
df5af2085494f4dfe1ce22a8c6feaed71ebad5e7
3,638,199
def ifft_function(G,Fs,axis=0): """ This function gives the IDFT Arguments --------------------------- G : double DFT (complex Fourier coefficients) Fs : double sample rate, maximum frequency of G times 2 (=F_nyquist*2) axis : double the axis on which the IDFT operates Returns --------------------------- t : double time axis x : double time series Reference: """ G=np.atleast_2d(G) n=np.shape(G) n_points=n[axis] G=np.fft.ifft( np.fft.ifftshift(G,None,axis) )/n_points dt=1/np.double(Fs) t=np.arange(0,dt*n_points,dt) return t,x
901218d6c795d0ee3163496b6889899b9be16342
3,638,200
def user_tweets_stats_grouped_new(_, group_type): """ Args: _: Http Request (ignored in this function) group_type: Keyword defining group label (day,month,year) Returns: Activities grouped by (day or month or year) wrapped on response's object """ error_messages = [] success_messages = [] status = HTTP_200_OK index_per_type = { 'year': 0, 'month': 1, 'day': 2 } types = ["year", "month", "day"] success, data, message = queries.user_tweets_stats_grouped(types[:index_per_type[group_type] + 1], accum=False) if success: success_messages.append(message) else: error_messages.append(message) status = HTTP_403_FORBIDDEN return create_response(data=data, error_messages=error_messages, success_messages=success_messages, status=status)
e1fc3dfd96fde3f2e01822b79bd97e508982e3d4
3,638,201
def login_required(func, *args, **kwargs): """ This is a decorator that can be applied to a Controller method that needs a logged in user. The inner method receives the Controller instance and checks if the user is logged in using the `request.is_authenticated` Boolean on the Controller instance :param func: The is the function being decorated. :return: Either the method that is decorated (if user is logged in) else `unauthenticated` response (HTTP 401). """ def inner(controller_obj, *args, **kwargs): if controller_obj.request.is_authenticated: return func(controller_obj, *args, **kwargs) else: return response.json({ "message": "unauthenticated" }, status=401) return inner
3611bb87544ece2516d4a738e3ab68b58ee154f4
3,638,203
def preProcessImage(rgbImage): """ Preprocess the input RGB image @rgbImage: Input RGB Image """ # Color space conversion img_gray = cv2.cvtColor(rgbImage, cv2.COLOR_BGR2GRAY) img_hsv = cv2. cvtColor(rgbImage, cv2.COLOR_BGR2HLS) ysize, xsize = getShape(img_gray) #Detecting yellow and white colors low_yellow = np.array([20, 100, 100]) high_yellow = np.array([30, 255, 255]) mask_yellow = cv2.inRange(img_hsv, low_yellow, high_yellow) mask_white = cv2.inRange(img_gray, 200, 255) mask_yw = cv2.bitwise_or(mask_yellow, mask_white) mask_onimage = cv2.bitwise_and(img_gray, mask_yw) #Smoothing for removing noise gray_blur = cv2.GaussianBlur(mask_onimage, (5,5), 0) return gray_blur, xsize, ysize
ea70956bca99e28a6928867a40a3b579e2c8931b
3,638,204
from typing import List from typing import Dict from typing import Any import time import json def consume_messages(consumer: Consumer, num_expected: int, serialize: bool = True) -> List[Dict[str, Any]]: """helper function for polling 'everything' off a topic""" start = time.time() consumed_messages = [] while (time.time() - start) < POLL_TIMEOUT: message = consumer.poll(1) if message is None: continue if message.error(): logger.error(message.error()) else: _msg = message.value().decode("utf-8") if serialize: msg = json.loads(_msg) else: msg = _msg consumed_messages.append(msg) if num_expected == len(consumed_messages): break consumer.close() return consumed_messages
5bf5db5180222d235e08a65a4e67e6daccf9c4d7
3,638,205
def get_compressed_size(data, compression, block_size=DEFAULT_BLOCK_SIZE): """ Returns the number of bytes required when the given data is compressed. Parameters ---------- data : buffer compression : str The type of compression to use. block_size : int, optional Input data will be split into blocks of this size (in bytes) before the compression. Returns ------- bytes : int """ compression = validate(compression) encoder = _get_encoder(compression) l = 0 for i in range(0, len(data), block_size): l += len(encoder.compress(data[i:i+block_size])) if hasattr(encoder, "flush"): l += len(encoder.flush()) return l
f7c72cf7097ee9f15b9aa0b1b6d46fe060cc0c15
3,638,206
def flat_command(bias=False, flat_map=False, return_shortname=False, dm_num=1): """ Creates a DmCommand object for a flat command. :param bias: Boolean flag for whether to apply a bias. :param flat_map: Boolean flag for whether to apply a flat_map. :param return_shortname: Boolean flag that will return a string that describes the object as the second parameter. :param dm_num: 1 or 2, for DM1 or DM2. :return: DmCommand object, and optional descriptive string (good for filename). """ short_name = "flat" # Bias. if flat_map: short_name += "_flat_map" if bias: short_name += "_bias" num_actuators_pupil = CONFIG_INI.getint(config_name, 'dm_length_actuators') zero_array = np.zeros((num_actuators_pupil, num_actuators_pupil)) dm_command_object = DmCommand(zero_array, dm_num, flat_map=flat_map, bias=bias) if return_shortname: return dm_command_object, short_name else: return dm_command_object
7b375c4b73686f286f07b8a327f2237e3ecb9ad0
3,638,207
def plot_graph_routes( G, routes, bbox=None, fig_height=6, fig_width=None, margin=0.02, bgcolor="w", axis_off=True, show=True, save=False, close=True, file_format="png", filename="temp", dpi=300, annotate=False, node_color="#999999", node_size=15, node_alpha=1, node_edgecolor="none", node_zorder=1, edge_color="#999999", edge_linewidth=1, edge_alpha=1, use_geom=True, orig_dest_points=None, route_color="r", route_linewidth=4, route_alpha=0.5, orig_dest_node_alpha=0.5, orig_dest_node_size=100, orig_dest_node_color="r", orig_dest_point_color="b", ): """ Plot several routes along a networkx spatial graph. Parameters ---------- G : networkx.MultiDiGraph input graph routes : list the routes as a list of lists of nodes bbox : tuple bounding box as north,south,east,west - if None will calculate from spatial extents of data. if passing a bbox, you probably also want to pass margin=0 to constrain it. fig_height : int matplotlib figure height in inches fig_width : int matplotlib figure width in inches margin : float relative margin around the figure axis_off : bool if True turn off the matplotlib axis bgcolor : string the background color of the figure and axis show : bool if True, show the figure save : bool if True, save the figure as an image file to disk close : bool close the figure (only if show equals False) to prevent display file_format : string the format of the file to save (e.g., 'jpg', 'png', 'svg') filename : string the name of the file if saving dpi : int the resolution of the image file if saving annotate : bool if True, annotate the nodes in the figure node_color : string the color of the nodes node_size : int the size of the nodes node_alpha : float the opacity of the nodes node_edgecolor : string the color of the node's marker's border node_zorder : int zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot nodes beneath them or 3 to plot nodes atop them edge_color : string the color of the edges' lines edge_linewidth : float the width of the edges' lines edge_alpha : float the opacity of the edges' lines use_geom : bool if True, use the spatial geometry attribute of the edges to draw geographically accurate edges, rather than just lines straight from node to node orig_dest_points : list of tuples optional, a group of (lat, lng) points to plot instead of the origins and destinations of each route nodes route_color : string the color of the route route_linewidth : int the width of the route line route_alpha : float the opacity of the route line orig_dest_node_alpha : float the opacity of the origin and destination nodes orig_dest_node_size : int the size of the origin and destination nodes orig_dest_node_color : string the color of the origin and destination nodes orig_dest_point_color : string the color of the origin and destination points if being plotted instead of nodes Returns ------- fig, ax : tuple """ # plot the graph but not the routes fig, ax = plot_graph( G, bbox=bbox, fig_height=fig_height, fig_width=fig_width, margin=margin, axis_off=axis_off, bgcolor=bgcolor, show=False, save=False, close=False, filename=filename, dpi=dpi, annotate=annotate, node_color=node_color, node_size=node_size, node_alpha=node_alpha, node_edgecolor=node_edgecolor, node_zorder=node_zorder, edge_color=edge_color, edge_linewidth=edge_linewidth, edge_alpha=edge_alpha, use_geom=use_geom, ) # save coordinates of the given reference points orig_dest_points_lats = [] orig_dest_points_lons = [] if orig_dest_points is None: # if caller didn't pass points, use the first and last node in each route as # origin/destination points for route in routes: origin_node = route[0] destination_node = route[-1] orig_dest_points_lats.append(G.nodes[origin_node]["y"]) orig_dest_points_lats.append(G.nodes[destination_node]["y"]) orig_dest_points_lons.append(G.nodes[origin_node]["x"]) orig_dest_points_lons.append(G.nodes[destination_node]["x"]) else: # otherwise, use the passed points as origin/destination points for point in orig_dest_points: orig_dest_points_lats.append(point[0]) orig_dest_points_lons.append(point[1]) orig_dest_node_color = orig_dest_point_color # scatter the origin and destination points ax.scatter( orig_dest_points_lons, orig_dest_points_lats, s=orig_dest_node_size, c=orig_dest_node_color, alpha=orig_dest_node_alpha, edgecolor=node_edgecolor, zorder=4, ) # plot the routes lines lines = [] for route in routes: lines.extend(_node_list_to_coordinate_lines(G, route, use_geom)) # add the lines to the axis as a linecollection lc = LineCollection( lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3 ) ax.add_collection(lc) # save and show the figure as specified fig, ax = _save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off) return fig, ax
333479cd0924df968f66ba328735a309a10e41a9
3,638,208
import re def _parse_book_info(html): """解析豆瓣图书信息(作者,出版社,出版年,定价) :param html(string): 图书信息部分的原始html """ end_flag = 'END_FLAG' html = html.replace('<br>', end_flag) html = html.replace('<br/>', end_flag) doc = lxml.html.fromstring(html) text = doc.text_content() pattern = r'{}[::](.*?){}' return { key: re.search( pattern.format(column, end_flag), text, re.I | re.DOTALL ) .group(1) .strip() for key, column in [ ('author', '作者'), ('press', '出版社'), ('publish_date', '出版年'), ('price', '定价'), ] }
d327d9561a1306f1242f1f78c01517bd2358aa0b
3,638,209
def offers(request, region_slug, language_code=None): """ Function to iterate through all offers related to a region and adds them to a JSON. Returns: [String]: [description] """ region = Region.objects.get(slug=region_slug) result = [] for offer in region.offers.all(): result.append(transform_offer(offer)) return JsonResponse(result, safe=False)
d0256abb9a1fda0fd0296dab811f3bef2091c6d3
3,638,210
def get_ellipse(mu: np.ndarray, cov: np.ndarray, draw_legend: bool = True): """ Draw an ellipse centered at given location and according to specified covariance matrix Parameters ---------- mu : ndarray of shape (2,) Center of ellipse cov: ndarray of shape (2,2) Covariance of Gaussian Returns ------- scatter: A plotly trace object of the ellipse """ l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1]) theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else ( np.pi / 2 if cov[0, 0] < cov[1, 1] else 0) t = np.linspace(0, 2 * pi, 100) xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t)) ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t)) return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines", marker_color="black", showlegend=draw_legend, name="covariance")
639e2161819e76c485efaf22598cfcc601a10122
3,638,211
from typing import Sequence from typing import List import hashlib from typing import Dict def load_hashes( filename: str, hash_algorithm_names: Sequence[str] ) -> HashResult: """ Load the size and hash hex digests for the given file. """ # See https://github.com/python/typeshed/issues/2928 hashes: List['hashlib._hashlib._HASH'] = [] # type: ignore for name in hash_algorithm_names: hashes.append(hashlib.new(name)) size = 0 with open(filename, 'rb') as inp: data = inp.read(_BUFFER_SIZE) for hashf in hashes: hashf.update(data) size += len(data) digests: Dict[str, str] = {} for idx in range(len(hash_algorithm_names)): hash_name = hash_algorithm_names[idx] hashf = hashes[idx] digests[hash_name] = hashf.hexdigest() # type: ignore return (size, digests)
6846e39838f2017a46472826ba07bc9974e80c5a
3,638,212
def ucs(st: Pixel, end: Pixel, data: np.ndarray): """ Iterative method to find a Dijkstra path, if one exists from current to end vertex :param startKey: start pixel point key :param endKey: end pixel point key :return: path """ q = PriorityQueue() startPriorityPixel = PixelPriority(st, 0, 0) # start priority pixel with 0 priority q.put((0, startPriorityPixel)) lowest = startPriorityPixel visited = dict() while lowest.pxl != end: if q.empty(): # No way to get to end return [], -1 thisDistace = lowest.distance for u in lowest.pxl.getNeighbors(): if u is not None and (u.x, u.y) not in visited: showImage(data, u.y, u.x) visited[(u.x, u.y)] = 1 # distance travelled from start pixel to current pixel dist = sqrt(pow(u.x - lowest.pxl.x, 2) + pow(u.y - lowest.pxl.y, 2) + \ pow(u.elevation - lowest.pxl.elevation, 2)) newDistance = thisDistace + dist priority = newDistance priorityPixel = PixelPriority(u, newDistance, priority) priorityPixel.predecessor = lowest q.put((priority, priorityPixel)) lowest = q.get()[1] path = [] if lowest.distance != 0: # We found the end, but it never got connected. lst = lowest while lst is not None: path.insert(0, lst.pxl) lst = lst.predecessor return path
743dbe230073bde4ba7b95e4520f097d8f7a4443
3,638,213
def tvdb_refresh_token(token: str) -> str: """ Refreshes JWT token. Online docs: api.thetvdb.com/swagger#!/Authentication/get_refresh_token. """ url = "https://api.thetvdb.com/refresh_token" headers = {"Authorization": f"Bearer {token}"} status, content = request_json(url, headers=headers, cache=False) if status == 401: raise MnamerException("invalid token") elif status != 200 or not content.get("token"): # pragma: no cover raise MnamerNetworkException("TVDb down or unavailable?") return content["token"]
a1974f43ed0e314100c686545bb610be9cc910ed
3,638,214
def get_data(): """ _ _ _ """ df_hospital = download_hospital_admissions() #sliding_r_df = walkingR(df_hospital, "Hospital_admission") df_lcps = download_lcps() df_mob_r = download_mob_r() df_gemeente_per_dag = download_gemeente_per_dag() df_reprogetal = download_reproductiegetal() df_uitgevoerde_testen = download_uitgevoerde_testen() type_of_join = "outer" df = pd.merge(df_mob_r, df_hospital, how=type_of_join, left_on = 'date', right_on="Date_of_statistics") #df = df_hospital df.loc[df['date'].isnull(),'date'] = df['Date_of_statistics'] df = pd.merge(df, df_lcps, how=type_of_join, left_on = 'date', right_on="Datum") df.loc[df['date'].isnull(),'date'] = df['Datum'] #df = pd.merge(df, sliding_r_df, how=type_of_join, left_on = 'date', right_on="date_sR", left_index=True ) df = pd.merge(df, df_gemeente_per_dag, how=type_of_join, left_on = 'date', right_on="Date_of_publication", left_index=True ) df = pd.merge(df, df_reprogetal, how=type_of_join, left_on = 'date', right_on="Date", left_index=True ) df = pd.merge(df, df_uitgevoerde_testen, how=type_of_join, left_on = 'date', right_on="Date_of_statistics", left_index=True ) df = df.sort_values(by=['date']) df = splitupweekweekend(df) df, werkdagen, weekend_ = last_manipulations(df, None, None) df.set_index('date') return df, werkdagen, weekend_
2a9b909dc53b710ce9f1729e336464857a27bb30
3,638,215
def load_bin_file(bin_file, dtype="float32"): """Load data from bin file""" data = np.fromfile(bin_file, dtype=dtype) return data
facdabb726efd66ce6e7e462aed9458d8f3dc947
3,638,216
def nearest_value(array, value): """ Searches array for the closest value to a given target. Arguments: array {NumPy Array} -- A NumPy array of numbers. value {float/int} -- The target value. Returns: float/int -- The closest value to the target value found in the array. """ return array[np.abs(array - value).argmin()]
e9bf37b02bd55a0bdd9bf6f001aca6bc69895d8c
3,638,217