code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def augmentOrDiminish(self, amountToScale, inPlace=True): <NEW_LINE> <INDENT> if not amountToScale > 0: <NEW_LINE> <INDENT> raise DurationException('amountToScale must be greater than zero') <NEW_LINE> <DEDENT> if inPlace: <NEW_LINE> <INDENT> post = self <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> post = DurationUnit() <NEW_LINE> <DEDENT> post.quarterLength = self.quarterLength * amountToScale <NEW_LINE> if not inPlace: <NEW_LINE> <INDENT> return post <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
Given a number greater than zero, multiplies the current quarterLength of the duration by the number and resets the components for the duration (by default). Or if inPlace is set to False, returns a new duration that has the new length. Note that the default for inPlace is the opposite of what it is for augmentOrDiminish on a Stream. This is done purposely to reflect the most common usage. >>> bDur = duration.DurationUnit('16th') >>> bDur <music21.duration.DurationUnit 0.25> >>> bDur.augmentOrDiminish(2) >>> bDur.quarterLength 0.5 >>> bDur.type 'eighth' >>> bDur <music21.duration.DurationUnit 0.5> >>> bDur.augmentOrDiminish(4) >>> bDur.type 'half' >>> bDur <music21.duration.DurationUnit 2.0> >>> bDur.augmentOrDiminish(.125) >>> bDur.type '16th' >>> bDur <music21.duration.DurationUnit 0.25> >>> cDur = bDur.augmentOrDiminish(16, inPlace=False) >>> cDur, bDur (<music21.duration.DurationUnit 4.0>, <music21.duration.DurationUnit 0.25>)
625941c0d10714528d5ffc4d
def __add_sldIdLst(self): <NEW_LINE> <INDENT> sldIdLst = _child(self._element, 'p:sldIdLst', _nsmap) <NEW_LINE> assert sldIdLst is None, '__add_sldIdLst() called where ' '<p:sldIdLst> already exists' <NEW_LINE> sldIdLst = _Element('p:sldIdLst', _nsmap) <NEW_LINE> sldSz = _child(self._element, 'p:sldSz', _nsmap) <NEW_LINE> if sldSz is not None: <NEW_LINE> <INDENT> sldSz.addprevious(sldIdLst) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> notesSz = _child(self._element, 'p:notesSz', _nsmap) <NEW_LINE> notesSz.addprevious(sldIdLst) <NEW_LINE> <DEDENT> return sldIdLst
Add a <p:sldIdLst> element to <p:presentation> in the right sequence among its siblings.
625941c0b57a9660fec337ed
def two_length_run(l): <NEW_LINE> <INDENT> for i in range(1,len(l)): <NEW_LINE> <INDENT> if(l[i]==l[i-1]): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> <DEDENT> return False
(List)->boolean Returns true if l contains a run of at least length two Otherwise, returns false
625941c0e5267d203edcdc0b
def __call__(self, clip): <NEW_LINE> <INDENT> angle = random.uniform(self.degrees[0], self.degrees[1]) <NEW_LINE> if isinstance(clip[0], np.ndarray): <NEW_LINE> <INDENT> rotated = [rotate(image=img, angle=angle, preserve_range=True) for img in clip] <NEW_LINE> <DEDENT> elif isinstance(clip[0], PIL.Image.Image): <NEW_LINE> <INDENT> rotated = [img.rotate(angle) for img in clip] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) <NEW_LINE> <DEDENT> return rotated
Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images
625941c0bde94217f3682d5f
def medoid(self) : <NEW_LINE> <INDENT> medoid = None <NEW_LINE> all_dis = [] <NEW_LINE> for point in self.points: <NEW_LINE> <INDENT> dis = 0 <NEW_LINE> for other_point in self.points: <NEW_LINE> <INDENT> dis += point.distance(other_point) <NEW_LINE> <DEDENT> all_dis.append(dis) <NEW_LINE> <DEDENT> medoid = self.points[all_dis.index(min(all_dis))] <NEW_LINE> return medoid
Compute medoid of this cluster, that is, the point in this cluster that is closest to all other points in this cluster. Returns -------------------- medoid -- Point, medoid of this cluster
625941c0a8370b771705280c
def test_pickler(self): <NEW_LINE> <INDENT> dot = Dot() <NEW_LINE> data = dot.create_dot(self.builder().merge(self.setup())) <NEW_LINE> pickler = Pickler() <NEW_LINE> self.assertEquals(pickler.create(data), None)
Test the pickler (Not working)
625941c0283ffb24f3c5586f
def list_tags_for_resource( name, region=None, key=None, keyid=None, profile=None, **args ): <NEW_LINE> <INDENT> conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) <NEW_LINE> if "ResourceName" in args: <NEW_LINE> <INDENT> log.info( "'name: %s' param being overridden by explicitly provided " "'ResourceName: %s'", name, args["ResourceName"], ) <NEW_LINE> name = args["ResourceName"] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> args["ResourceName"] = name <NEW_LINE> <DEDENT> args = {k: v for k, v in args.items() if not k.startswith("_")} <NEW_LINE> try: <NEW_LINE> <INDENT> r = conn.list_tags_for_resource(**args) <NEW_LINE> if r and "Taglist" in r: <NEW_LINE> <INDENT> return r["TagList"] <NEW_LINE> <DEDENT> return [] <NEW_LINE> <DEDENT> except botocore.exceptions.ClientError as e: <NEW_LINE> <INDENT> log.error("Failed to list tags for resource %s: %s", name, e) <NEW_LINE> return []
List tags on an Elasticache resource. Note that this function is essentially useless as it requires a full AWS ARN for the resource being operated on, but there is no provided API or programmatic way to find the ARN for a given object from its name or ID alone. It requires specific knowledge about the account number, AWS partition, and other magic details to generate. If you happen to have those handy, feel free to utilize this however... Example: .. code-block:: bash salt myminion boto3_elasticache.list_tags_for_resource name'=arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot'
625941c066656f66f7cbc116
def pre_order(tree, output=[]): <NEW_LINE> <INDENT> if not tree: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> output.append(tree['value']) <NEW_LINE> pre_order(tree['lchild']) <NEW_LINE> pre_order(tree['rchild']) <NEW_LINE> return output
value -> left -> right
625941c05166f23b2e1a50c5
def getPermutation(self, n, k): <NEW_LINE> <INDENT> def factorial(n): <NEW_LINE> <INDENT> return (n*factorial(n-1)) if n != 0 else 1 <NEW_LINE> <DEDENT> ind = [] <NEW_LINE> ori_n = n <NEW_LINE> b = k <NEW_LINE> n_list = list(range(1,n+1)) <NEW_LINE> while b!=0 and b!=1: <NEW_LINE> <INDENT> a = b // factorial(n-1) <NEW_LINE> b = b % factorial(n-1) <NEW_LINE> ind.append(a) <NEW_LINE> n -= 1 <NEW_LINE> <DEDENT> if b==1: <NEW_LINE> <INDENT> ind += [0]*n <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ind[-1] = ind[-1]-1 <NEW_LINE> ind += [-1]*n <NEW_LINE> <DEDENT> print(ind) <NEW_LINE> result = '' <NEW_LINE> for i in ind: <NEW_LINE> <INDENT> result = result+str(n_list.pop(i)) <NEW_LINE> <DEDENT> return result
:type n: int :type k: int :rtype: str
625941c0be383301e01b53f5
def __get_system_function (self): <NEW_LINE> <INDENT> if self.sys_function != None: <NEW_LINE> <INDENT> return self.sys_function <NEW_LINE> <DEDENT> self.__define_sys_eq () <NEW_LINE> sys_vars = self.sys_vars <NEW_LINE> sys_params = self.sys_params <NEW_LINE> sys_fun = autowrap (self.sys_eq, backend='cython', tempdir='autowrap_sys_' + self.name + '_tmp', args=[sys_vars, sys_params]) <NEW_LINE> wrapped_fun = self.odeint_sys_wrapper (sys_fun) <NEW_LINE> self.sys_function = wrapped_fun <NEW_LINE> return wrapped_fun
Creates a function that describes the dynamics of the system. This method uses Sympy to automatically create C code that represents the system; this C code is also automatically compiled and wrapped as a python function.
625941c0f7d966606f6a9f6e
def generate_filter(mask): <NEW_LINE> <INDENT> def func(x, z=None): <NEW_LINE> <INDENT> if not hasattr(mask, '__call__'): <NEW_LINE> <INDENT> _mask = mask <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> _mask = mask(x, z) <NEW_LINE> <DEDENT> if z is None and hasattr(x, '_x') and hasattr(x, '_y'): <NEW_LINE> <INDENT> from copy import copy <NEW_LINE> m = copy(x) <NEW_LINE> mon = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> from mystic.monitors import Monitor <NEW_LINE> m = Monitor() <NEW_LINE> m._x,m._y = x,z <NEW_LINE> mon = False <NEW_LINE> <DEDENT> ax = True if hasattr(m._x, 'tolist') else False <NEW_LINE> ay = True if hasattr(m._y, 'tolist') else False <NEW_LINE> from numpy import asarray <NEW_LINE> m._x = asarray(m._x)[_mask] <NEW_LINE> m._y = asarray(m._y)[_mask] <NEW_LINE> if not ax: m._x = m._x.tolist() <NEW_LINE> if not ay: m._y = m._y.tolist() <NEW_LINE> if mon: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> ai = True if hasattr(m._id, 'tolist') else False <NEW_LINE> m._id = array(m._id)[_mask] <NEW_LINE> if not ai: m._id = m._id.tolist() <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> return m <NEW_LINE> <DEDENT> return m._x, m._y <NEW_LINE> <DEDENT> return func
generate a data filter from a data masking function mask: masking function (built with generate_mask) or a boolean mask returns a function that filters (x,y) or a monitor, based on the given mask x',y' = filter(x,y), where filter removes values where mask is False For example: >>> mon = Monitor() >>> mon([0.0,0.5,1.0],2) >>> mon([2.0,3.0,4.0],3) >>> mon([4.0,5.0,6.0],4) >>> mon([5.0,5.5,6.5],6) >>> >>> @impose_bounds((0,5)) ... def inputs(x): ... return x ... >>> m = generate_filter(generate_mask(inputs))(mon) >>> m._x [[0.0, 0.5, 1.0], [2.0, 3.0, 4.0]] >>> m._y [2, 3] >>> >>> @integers() ... def identity(x): ... return x ... >>> m = generate_filter(generate_mask(identity))(mon) >>> m._x [[2.0, 3.0, 4.0], [4.0, 5.0, 6.0]] >>> m._y [3, 4]
625941c08c3a873295158324
def test_field_lengths(self): <NEW_LINE> <INDENT> self.assertEqual(Climb.slug.property.columns[0].type.length, Climb.SLUG_STR_MAX) <NEW_LINE> self.assertEqual(Climb.name.property.columns[0].type.length, Climb.NAME_STR_MAX) <NEW_LINE> self.assertEqual(Climb.location.property.columns[0].type.length, Climb.LOCATION_STR_MAX)
Tests slug, name and location string lengths are set.
625941c026238365f5f0edd7
def test_create_token_for_user(self): <NEW_LINE> <INDENT> payload = {'email': 'test@test.com', 'password': 'test_password'} <NEW_LINE> create_user(**payload) <NEW_LINE> res = self.client.post(TOKEN_URL, payload) <NEW_LINE> self.assertIn('token', res.data) <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_200_OK)
Test that a token is created for the user
625941c099fddb7c1c9de2fe
def testVehicleStatsDefLevelMilliPercent(self): <NEW_LINE> <INDENT> inst_req_only = self.make_instance(include_optional=False) <NEW_LINE> inst_req_and_optional = self.make_instance(include_optional=True)
Test VehicleStatsDefLevelMilliPercent
625941c071ff763f4b5495f4
def get_gaussian_mean_and_variance(self): <NEW_LINE> <INDENT> r <NEW_LINE> a = self.phi[3] <NEW_LINE> nu = 2*a <NEW_LINE> if nu <= 1: <NEW_LINE> <INDENT> raise ValueError("Mean not defined for degrees of freedom <= 1") <NEW_LINE> <DEDENT> if nu <= 2: <NEW_LINE> <INDENT> raise ValueError("Variance not defined if degrees of freedom <= 2") <NEW_LINE> <DEDENT> tau = self.u[2] <NEW_LINE> tau_mu = self.u[0] <NEW_LINE> mu = tau_mu / misc.add_trailing_axes(tau, 1) <NEW_LINE> var = misc.get_diag(self.u[1], ndim=1) - tau_mu*mu <NEW_LINE> var = var / misc.add_trailing_axes(tau, 1) <NEW_LINE> var = nu / (nu-2) * var <NEW_LINE> return (mu, var)
Return the mean and variance of the distribution
625941c03317a56b86939bca
def getUnionEncoding(self, text): <NEW_LINE> <INDENT> tokens = TextPreprocess().tokenize(text) <NEW_LINE> counts = Counter() <NEW_LINE> for t in tokens: <NEW_LINE> <INDENT> bitmap = self.client.getBitmap(t)["fingerprint"]["positions"] <NEW_LINE> counts.update(bitmap) <NEW_LINE> <DEDENT> positions = self.sparseUnion(counts) <NEW_LINE> encoding = { "text": text, "sparsity": len(positions) * 100 / float(self.n), "df": 0.0, "height": self.h, "width": self.w, "score": 0.0, "fingerprint": { "positions":sorted(positions) }, "pos_types": [] } <NEW_LINE> return encoding
Encode each token of the input text, take the union, and then sparsify. @param text (str) A non-tokenized sample of text. @return (dict) The bitmap encoding is at encoding["fingerprint"]["positions"].
625941c07c178a314d6ef3c8
def is_valid(self, serializer): <NEW_LINE> <INDENT> if not serializer.is_valid(): <NEW_LINE> <INDENT> message = '' <NEW_LINE> errors = serializer.errors <NEW_LINE> print(errors) <NEW_LINE> for field in errors: <NEW_LINE> <INDENT> message += str(errors[field][0])+'\n' <NEW_LINE> <DEDENT> raise self.response.Fail(message=message)
数据是否验证通过 :param serializer: :return:
625941c0e8904600ed9f1e97
def build_maskiou_head(cfg): <NEW_LINE> <INDENT> name = cfg.MODEL.ROI_MASKIOU_HEAD.NAME <NEW_LINE> return ROI_MASKIOU_HEAD_REGISTRY.get(name)(cfg)
Build a mask iou head defined by `cfg.MODEL.ROI_MASKIOU_HEAD.NAME`.
625941c0a934411ee37515ff
def check_email_exists(self, email): <NEW_LINE> <INDENT> query = "SELECT * from users where email = '{}';".format(email) <NEW_LINE> db.cursor.execute(query) <NEW_LINE> user_details = db.cursor.fetchall() <NEW_LINE> if user_details: <NEW_LINE> <INDENT> return user_details
This method checks through the list for values to avoid a user from regestering twice
625941c0d58c6744b4257bcc
def __init__( self, layout, imagePath=None, offset=(0, 0), bgcolor=COLORS.Off, brightness=255, cycles=1, seconds=None, random=False, use_file_fps=True, use_gamma=True, scale_to=None, **kwds): <NEW_LINE> <INDENT> super().__init__(layout, **kwds) <NEW_LINE> self.cycles = cycles <NEW_LINE> self.cycle_count = 0 <NEW_LINE> self.seconds = seconds <NEW_LINE> self.last_start = 0 <NEW_LINE> self.random = random <NEW_LINE> self.use_file_fps = use_file_fps <NEW_LINE> self._bright = brightness <NEW_LINE> self._bgcolor = color_scale(bgcolor, self._bright) <NEW_LINE> self._offset = offset <NEW_LINE> self._image_buffers = [None, None] <NEW_LINE> self._cur_img_buf = 1 <NEW_LINE> self.imagePath = imagePath or str(DEFAULT_ANIM) <NEW_LINE> self.folder_mode = os.path.isdir(self.imagePath) <NEW_LINE> self.gif_files = [] <NEW_LINE> self.gif_indices = [] <NEW_LINE> self.folder_index = -1 <NEW_LINE> self.load_thread = None <NEW_LINE> self.use_gamma = use_gamma <NEW_LINE> self.scale_to = scale_to and SCALE_TO[scale_to] <NEW_LINE> if self.folder_mode: <NEW_LINE> <INDENT> self.gif_files = glob.glob(self.imagePath + "/*.gif") <NEW_LINE> self.gif_indices = list(range(len(self.gif_files))) <NEW_LINE> self.loadNextGIF() <NEW_LINE> self.swapbuf() <NEW_LINE> self.load_thread = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.loadGIFFile(self.imagePath) <NEW_LINE> self.swapbuf()
Animation class for displaying image animations for GIF files or a set of bitmaps layout: layout.Matrix instance imagePath: Path to either a single animated GIF image or folder of GIF files offset: X, Y coordinates of the top-left corner of the image bgcolor: RGB tuple color to replace any transparent pixels with. Avoids transparent showing as black brightness: Brightness value (0-255) to scale the image by. Otherwise uses master brightness at the time of creation use_gamma: If true, use the driver's gamma on the raw image data. TODO: why do we do this? scale_to: Which dimensions to scale the image to? None: Don't scale 'x': Scale to use full width 'y': Scale to use full height 'xy': Scale both width and height 'fit: Use best fit from 'x' or 'y'
625941c03617ad0b5ed67e65
def _filter_and_smooth(self, f, included, data): <NEW_LINE> <INDENT> if f is not None: <NEW_LINE> <INDENT> Z = f._parse_observations(data[included].T) <NEW_LINE> (transition_matrices, transition_offsets, transition_covariance, observation_matrices, observation_offsets, observation_covariance, initial_state_mean, initial_state_covariance) = ( f._initialize_parameters() ) <NEW_LINE> (predicted_state_means, predicted_state_covariances, _, filtered_state_means, filtered_state_covariances) = ( filtermethods._filter( transition_matrices, observation_matrices, transition_covariance, observation_covariance, transition_offsets, observation_offsets, initial_state_mean, initial_state_covariance, Z ) ) <NEW_LINE> (smoothed_state_means, smoothed_state_covariances, kalman_smoothing_gains) = ( filtermethods._smooth( transition_matrices, filtered_state_means, filtered_state_covariances, predicted_state_means, predicted_state_covariances ) ) <NEW_LINE> pairwise_covariances = filtermethods._smooth_pair( smoothed_state_covariances, kalman_smoothing_gains ) <NEW_LINE> state_means = smoothed_state_means <NEW_LINE> state_covariances = smoothed_state_covariances <NEW_LINE> pairwise_covariances = pairwise_covariances <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> state_means = None <NEW_LINE> state_covariances = None <NEW_LINE> pairwise_covariances = None <NEW_LINE> <DEDENT> return state_means, state_covariances, pairwise_covariances
f: kalman filter object included: boolean array indicating which data points have non-zero assignment probability data: data to estimate states on kalman filtering step, estimates distribution over state sequence given all of the data. relies on kalman filter package pykalman
625941c0d7e4931a7ee9de89
def create_index(self, columns, name=None): <NEW_LINE> <INDENT> self._check_dropped() <NEW_LINE> with self.database.lock: <NEW_LINE> <INDENT> if not name: <NEW_LINE> <INDENT> sig = abs(hash('||'.join(columns))) <NEW_LINE> name = 'ix_%s_%s' % (self.table.name, sig) <NEW_LINE> <DEDENT> if name in self.indexes: <NEW_LINE> <INDENT> return self.indexes[name] <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> columns = [self.table.c[c] for c in columns] <NEW_LINE> idx = Index(name, *columns) <NEW_LINE> idx.create(self.database.engine) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> idx = None <NEW_LINE> <DEDENT> self.indexes[name] = idx <NEW_LINE> return idx
Create an index to speed up queries on a table. If no ``name`` is given a random name is created. :: table.create_index(['name', 'country'])
625941c04f88993c3716bfd6
def _fit_precise(points: np.ndarray) -> Callable[[np.ndarray], np.ndarray]: <NEW_LINE> <INDENT> pt_v = _prep_pairs(points) <NEW_LINE> return interp1d(pt_v[:, 0], pt_v[:, 1], kind=('quadratic' if pt_v.shape[0] > 2 else 'linear'), bounds_error=False, fill_value=(pt_v[0, 1], pt_v[-1, 1]))
Create functions from interpolating points, unclosed, fill with y_first, y_last
625941c0187af65679ca508a
def get_datatypes_in_project(self, project_id, only_visible=False): <NEW_LINE> <INDENT> query = self.session.query(model.DataType ).join((model.Operation, model.Operation.id == model.DataType.fk_from_operation) ).filter(model.Operation.fk_launched_in == project_id).order_by(model.DataType.id) <NEW_LINE> if only_visible: <NEW_LINE> <INDENT> query = query.filter(model.DataType.visible == True) <NEW_LINE> <DEDENT> return query.all()
Get all the DataTypes for a given project with no other filter apart from the projectId
625941c0507cdc57c6306c42
def remount(name, device, mkmnt=False, fstype='', opts='defaults'): <NEW_LINE> <INDENT> if isinstance(opts, string_types): <NEW_LINE> <INDENT> opts = opts.split(',') <NEW_LINE> <DEDENT> mnts = active() <NEW_LINE> if name in mnts: <NEW_LINE> <INDENT> if 'remount' not in opts: <NEW_LINE> <INDENT> opts.append('remount') <NEW_LINE> <DEDENT> lopts = ','.join(opts) <NEW_LINE> cmd = 'mount -o {0} {1} {2} '.format(lopts, device, name) <NEW_LINE> if fstype: <NEW_LINE> <INDENT> cmd += ' -t {0}'.format(fstype) <NEW_LINE> <DEDENT> out = __salt__['cmd.run_all'](cmd) <NEW_LINE> if out['retcode']: <NEW_LINE> <INDENT> return out['stderr'] <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> return mount(name, device, mkmnt, fstype, opts)
Attempt to remount a device, if the device is not already mounted, mount is called CLI Example:: salt '*' mount.remount /mnt/foo /dev/sdz1 True
625941c0851cf427c661a47e
def insert(self, key, value): <NEW_LINE> <INDENT> if not self.have(key): <NEW_LINE> <INDENT> self[key] = value <NEW_LINE> self.save() <NEW_LINE> self.reload() <NEW_LINE> if self.logfile: <NEW_LINE> <INDENT> self.logg("Added key %s with value %s" %(key, value))
Insert specified value to specified key
625941c0a17c0f6771cbdfbf
def pop(self): <NEW_LINE> <INDENT> if self.is_empty(): <NEW_LINE> <INDENT> raise Empty('Stack is empty') <NEW_LINE> <DEDENT> return self._data.pop()
Remove and return the element from the top of the stack(i.e. LIFO) Raise Empty exception if the stack is empty.
625941c01f5feb6acb0c4ac0
def encode(lat, lon, precision=None): <NEW_LINE> <INDENT> lat, lon = _2fll(lat, lon) <NEW_LINE> if not precision: <NEW_LINE> <INDENT> for prec in range(1, 13): <NEW_LINE> <INDENT> gh = encode(lat, lon, prec) <NEW_LINE> ll = map2(float, decode(gh)) <NEW_LINE> if abs(lat - ll[0]) < EPS and abs(lon - ll[1]) < EPS: <NEW_LINE> <INDENT> return gh <NEW_LINE> <DEDENT> <DEDENT> prec = 12 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> prec = int(precision) <NEW_LINE> if not 0 < prec < 13: <NEW_LINE> <INDENT> raise ValueError <NEW_LINE> <DEDENT> <DEDENT> except ValueError: <NEW_LINE> <INDENT> raise ValueError('%s invalid: %r' % ('precision', precision)) <NEW_LINE> <DEDENT> <DEDENT> latS, latN = -90, 90 <NEW_LINE> lonW, lonE = -180, 180 <NEW_LINE> b = i = 0 <NEW_LINE> e, gh = True, [] <NEW_LINE> while len(gh) < prec: <NEW_LINE> <INDENT> i += i <NEW_LINE> if e: <NEW_LINE> <INDENT> m = favg(lonE, lonW) <NEW_LINE> if lon < m: <NEW_LINE> <INDENT> lonE = m <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> lonW = m <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> m = favg(latN, latS) <NEW_LINE> if lat < m: <NEW_LINE> <INDENT> latN = m <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> latS = m <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> <DEDENT> e = not e <NEW_LINE> b += 1 <NEW_LINE> if b == 5: <NEW_LINE> <INDENT> gh.append(_GeohashBase32[i]) <NEW_LINE> b = i = 0 <NEW_LINE> <DEDENT> <DEDENT> return ''.join(gh)
Encode a lat-/longitude as a geohash, either to the specified or if not given, an automatically evaluated precision. @param lat: Latitude (C{degrees}). @param lon: Longitude (C{degrees}). @keyword precision: Optional, desired geohash length (C{int}). @return: The geohash (C{str}). @raise ValueError: Invalid I{lat}, I{lon} or I{precision}. @example: >>> geohash.encode(52.205, 0.119, 7) # 'u120fxw' >>> geohash.encode(52.205, 0.119, 12) # 'u120fxwshvkg' >>> geohash.encode(52.205, 0.1188, 12) # 'u120fxws0jre' >>> geohash.encode(52.205, 0.1188) # 'u120fxw' >>> geohash.encode( 0, 0) # 's00000000000'
625941c0ad47b63b2c509eec
def all_err(err_message): <NEW_LINE> <INDENT> idx = 0 <NEW_LINE> while idx < len(err_message): <NEW_LINE> <INDENT> if len(err_message[idx]['RelatedProperties']) != 0: <NEW_LINE> <INDENT> if err_message[idx]['RelatedProperties'][0] == "#/FQDN": <NEW_LINE> <INDENT> if idx == 0: <NEW_LINE> <INDENT> print(DOMAINERR) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print(DOMAINERR1) <NEW_LINE> <DEDENT> idx += 1 <NEW_LINE> continue <NEW_LINE> <DEDENT> <DEDENT> check_info = err_message[idx]['Message'] <NEW_LINE> message = "%s%s" % (check_info[0].lower(), check_info[1:len(check_info) - 1]) <NEW_LINE> message = message.replace("Oem/Huawei/", "") <NEW_LINE> message = message.replace("NameServers/0", "PreferredServer") <NEW_LINE> message = message.replace("NameServers/1", "AlternateServer") <NEW_LINE> if idx == 0: <NEW_LINE> <INDENT> print('%s' % message) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print(' %s' % message) <NEW_LINE> <DEDENT> idx += 1
#==================================================================================== # @Method: 400 messages # @Param:idx, err_message # @Return: # @date:2017.08.29 10:50 #====================================================================================
625941c0a8370b771705280d
def electronicConvergeFinish(self,dir): <NEW_LINE> <INDENT> proc = subprocess.Popen(['grep','-i','NELM',dir+'/INCAR'],stdout=subprocess.PIPE) <NEW_LINE> result = proc.communicate()[0] <NEW_LINE> NELM = int(result.split('=')[1].split()[0]) <NEW_LINE> return self.elConvergeCheck(dir,NELM) and finishCheck(dir)
Test requires electronic convergence AND vasp finishing
625941c0f9cc0f698b14056a
def create_test_space(input_corpus, test_perc, contexts, pos_dict, bigrams=True, trigrams=True): <NEW_LINE> <INDENT> co_occurrences = np.zeros([0, len(contexts)]) <NEW_LINE> word_ids = {} <NEW_LINE> word_freqs = Counter() <NEW_LINE> last_word = 0 <NEW_LINE> corpus = json.load(open(input_corpus, 'r+')) <NEW_LINE> total_utterances = len(corpus[0]) <NEW_LINE> cutoff = total_utterances - np.floor(total_utterances / 100 * test_perc) <NEW_LINE> check_points = {np.floor((total_utterances - cutoff) / 100 * n) + cutoff: n for n in np.linspace(5, 100, 20)} <NEW_LINE> size = 2 if trigrams else 1 <NEW_LINE> nline = int(cutoff) <NEW_LINE> print("Considering utterances for test set from utterance number %d" % cutoff) <NEW_LINE> while nline < total_utterances: <NEW_LINE> <INDENT> tokens = corpus[0][nline] <NEW_LINE> lemmas = corpus[1][nline] <NEW_LINE> words = utterance.clean_utterance(tokens, lemmas=lemmas, pos_dict=pos_dict) <NEW_LINE> if len(words) > 1: <NEW_LINE> <INDENT> words.append('#end~bound') <NEW_LINE> last_idx = len(words) - 1 <NEW_LINE> idx = 1 <NEW_LINE> while idx < last_idx: <NEW_LINE> <INDENT> context_window = utterance.construct_window(words, idx, size) <NEW_LINE> current_contexts = utterance.get_ngrams(context_window, bigrams=bigrams, trigrams=trigrams) <NEW_LINE> target_word = words[idx] <NEW_LINE> word_freqs[target_word] += 1 <NEW_LINE> if target_word not in word_ids: <NEW_LINE> <INDENT> word_ids[target_word] = last_word <NEW_LINE> last_word += 1 <NEW_LINE> new_row = np.zeros([1, co_occurrences.shape[1]]) <NEW_LINE> co_occurrences = np.vstack([co_occurrences, new_row]) <NEW_LINE> <DEDENT> for context in current_contexts: <NEW_LINE> <INDENT> if context in contexts: <NEW_LINE> <INDENT> row_idx = word_ids[target_word] <NEW_LINE> col_idx = contexts[context] <NEW_LINE> co_occurrences[row_idx, col_idx] += 1 <NEW_LINE> <DEDENT> <DEDENT> idx += 1 <NEW_LINE> <DEDENT> <DEDENT> if nline in check_points: <NEW_LINE> <INDENT> print(strftime("%Y-%m-%d %H:%M:%S") + ": %d%% of the utterances allocated as test set has been processed." % check_points[nline]) <NEW_LINE> <DEDENT> nline += 1 <NEW_LINE> <DEDENT> return co_occurrences, word_ids, word_freqs
:param input_corpus: the same corpus used for training, in the same .json format (see the documentation to the function collect_contexts for further details :param test_perc: a number indicating the percentage of the input corpus to be used as test set - ideally this would be 100 - the training percentage, but different values can be chosen, However, we stress that it is preferable to avoid any overlap between training and test material :param contexts: a dictionary containing all the contexts collected during training, mapped to the column index each context has in the training space :param pos_dict: a dictionary mapping CHILDES Parts-of-Speech tags to custom tags (the same that was used as input to the function collect_contexts :param bigrams: a boolean indicating whether bigrams are to be collected :param trigrams: a boolean indicating whether trigrams are to be collected :return co_occurrences: a NumPY 2d array, where rows are words, columsn are distributional contexts, and cells contain integers indicating how many times a word co-occurred with a context in the input corpus :return word_ids a dictionary mapping words to numerical indices, indicating the corresponding row in the co-occurrence matrix :return word_freqs: a dictionary mapping words to their frequency count as computed from the test set
625941c0ad47b63b2c509eed
def run_dimension_etl(pygram_dimension_factory, source_sql, source_conn, output_conn, create_sql="", create_if_needed=True, fail_if_table_exists=True): <NEW_LINE> <INDENT> pygram_outputconn = pygrametl.ConnectionWrapper(connection=output_conn) <NEW_LINE> dw_table_exists = True <NEW_LINE> table_name = pygram_dimension_factory["name"] <NEW_LINE> cursor = output_conn.cursor() <NEW_LINE> logger.info("Processing dimension '%s'" % table_name) <NEW_LINE> try: <NEW_LINE> <INDENT> cursor.execute("desc %s" % table_name) <NEW_LINE> if fail_if_table_exists: <NEW_LINE> <INDENT> raise dwexcept.DataWarehouseTableAlreadyExists("Table %s already exists. Use delete_existing_tables to clean the warehouse." % table_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.warn("ETL for %s will be inserting data into an existing table", table_name) <NEW_LINE> <DEDENT> <DEDENT> except _mysql_exceptions.ProgrammingError as e: <NEW_LINE> <INDENT> if "DOESN'T EXIST" in str(e).upper(): <NEW_LINE> <INDENT> dw_table_exists = False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise e <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise e <NEW_LINE> <DEDENT> if not dw_table_exists: <NEW_LINE> <INDENT> logger.info("Creating table %s.", table_name) <NEW_LINE> result = cursor.execute(create_sql) <NEW_LINE> logger.info("Table %s created.", table_name) <NEW_LINE> <DEDENT> logger.info("Executing source query for dimension %s", table_name) <NEW_LINE> try: <NEW_LINE> <INDENT> pygram_dim_class = pygram_dimension_factory["class"] <NEW_LINE> pygram_dim_object = pygram_dim_class( name=pygram_dimension_factory["name"], key=pygram_dimension_factory["key"], attributes=pygram_dimension_factory["attributes"], lookupatts=pygram_dimension_factory["lookupatts"]) <NEW_LINE> if source_sql == 'CSV': <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> filepath = os.path.join( os.path.dirname(__file__), 'csv_source', '{0}.csv'.format( pygram_dimension_factory["name"])) <NEW_LINE> source_cursor = csv.DictReader(open(filepath, 'rb')) <NEW_LINE> <DEDENT> except IOError as e: <NEW_LINE> <INDENT> raise e <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> source_cursor = source_conn.cursor() <NEW_LINE> source_cursor.execute(source_sql) <NEW_LINE> <DEDENT> logger.info("Loading data") <NEW_LINE> start_time = datetime.utcnow() <NEW_LINE> row_count = 0 <NEW_LINE> for row in source_cursor: <NEW_LINE> <INDENT> row_count += 1 <NEW_LINE> pygram_dim_object.insert(row) <NEW_LINE> <DEDENT> end_time = datetime.utcnow() <NEW_LINE> logger.info("Loaded %d rows into table %s (elapsed time: %s)", row_count, table_name, end_time - start_time) <NEW_LINE> output_conn.commit() <NEW_LINE> return row_count <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise e
This function can be used in any kind of workflow (for example in a celery task) or in a simple main program.
625941c02eb69b55b151c819
def sample_images(img, seg): <NEW_LINE> <INDENT> n_files = len(img) <NEW_LINE> n_train = int(par.train_percentage / 100 * n_files) <NEW_LINE> n_test = n_files - n_train <NEW_LINE> train_img = np.zeros(shape=(n_train, par.img_width, par.img_height), dtype=np.float32) <NEW_LINE> train_seg = np.zeros(shape=(n_train, par.img_width, par.img_height), dtype=np.float32) <NEW_LINE> test_img = np.zeros(shape=(n_test, par.img_width, par.img_height), dtype=np.float32) <NEW_LINE> test_seg = np.zeros(shape=(n_test, par.img_width, par.img_height), dtype=np.float32) <NEW_LINE> c_train = 0 <NEW_LINE> c_test = 0 <NEW_LINE> for rand_idx in rand.sample(range(n_files), n_files): <NEW_LINE> <INDENT> print((f"Sampling images... Now at " + f"{100*(c_train+c_test+1)/n_files:.2f}%"), end='\r', flush=True) <NEW_LINE> if c_train < n_train: <NEW_LINE> <INDENT> train_img[c_train] = img[rand_idx] <NEW_LINE> train_seg[c_train] = seg[rand_idx] <NEW_LINE> c_train += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> test_img[c_test] = img[rand_idx] <NEW_LINE> test_seg[c_test] = seg[rand_idx] <NEW_LINE> c_test += 1 <NEW_LINE> <DEDENT> <DEDENT> print("\nSampling images completed!\n") <NEW_LINE> return train_img, train_seg, test_img, test_seg
Randomly divide the image and segmentation images into training and testing sets.
625941c0187af65679ca508b
def __current_character(self): <NEW_LINE> <INDENT> if self.offset < self.length: <NEW_LINE> <INDENT> return self.text[self.offset] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
Retrieve the character at the current offset
625941c00fa83653e4656f29
def lift(self, left, right, right_precalculated = None): <NEW_LINE> <INDENT> if right_precalculated is not None: <NEW_LINE> <INDENT> sup_right = self.support(right, itemset_precalculated = right_precalculated) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> sup_right = self.support(right) <NEW_LINE> <DEDENT> lift = float(self.confidence(left, right)) / float(sup_right) <NEW_LINE> return lift
Calculates the confidence of the rule :param left: left handside of the rule (list) :param right: right handside of the rule (list) :param right_precalculated: Existing suppport count value :return: Confidence value (float)
625941c08e7ae83300e4af39
def startacquisition_cmd(args, device): <NEW_LINE> <INDENT> device.setconfig(args.voltage, args.frequency, args.impuls_nb, args.channels_nb, args.integration_nb) <NEW_LINE> device.acquiremeasures(args.output, args.delim, args.stdoutdisplay, args.datetimedisplay)
start acquisition
625941c091f36d47f21ac45d
def print_last_word(words): <NEW_LINE> <INDENT> words = words.pop(-1) <NEW_LINE> print(words)
Print the last word after popping it off
625941c0bd1bec0571d9059b
def test_forward_rnn_layer_x(self): <NEW_LINE> <INDENT> rnn = layers.TransposedSummarizingRNN(10, 2, 'f', False, False) <NEW_LINE> o = rnn(torch.randn(1, 10, 32, 64)) <NEW_LINE> self.assertEqual(o[0].shape, (1, 2, 32, 64))
Test unidirectional RNN layer in x-dimension.
625941c0ac7a0e7691ed403d
def test_SetProductTYpe_request(self): <NEW_LINE> <INDENT> response = self.mock_request(product_id=self.PRODUCT_ID, type=self.TYPE) <NEW_LINE> self.assertEqual(response, self.RESPONSE) <NEW_LINE> self.assertDataSent(self.request_class.PRODUCT_ID, self.PRODUCT_ID) <NEW_LINE> self.assertDataSent(self.request_class.TYPE, self.TYPE)
Test the SetProductType request.
625941c08a43f66fc4b53fd4
def display_digit(image, label=None, pred_label=None): <NEW_LINE> <INDENT> if image.shape == (784,): <NEW_LINE> <INDENT> image = image.reshape((28, 28)) <NEW_LINE> <DEDENT> label = np.argmax(label, axis=0) <NEW_LINE> if pred_label is None and label is not None: <NEW_LINE> <INDENT> plt.figure_title('Label: %d' % (label)) <NEW_LINE> <DEDENT> elif label is not None: <NEW_LINE> <INDENT> plt.figure_title('Label: %d, Pred: %d' % (label, pred_label)) <NEW_LINE> <DEDENT> plt.imshow(image, cmap=plt.get_cmap('gray_r')) <NEW_LINE> plt.show()
Display the Digit from the image. If the Label and PredLabel is given, display it too.
625941c0d99f1b3c44c67501
def HasCode(self): <NEW_LINE> <INDENT> return bool(self._fragmentCode or self._vertexCode)
Returns whether the program has any code associated with it. If not, you shoul probably not enable it.
625941c02ae34c7f2600d09e
def get(metaopt): <NEW_LINE> <INDENT> m = EC2Metadata() <NEW_LINE> return m.get(metaopt)
primitive: return value of metaopt
625941c0004d5f362079a2a2
def preprocess_data(): <NEW_LINE> <INDENT> data = pd.read_csv('./housing.csv', header=None, sep='\s+') <NEW_LINE> data.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'Price'] <NEW_LINE> x = np.array(data[['RM', 'PTRATIO', 'LSTAT']]) <NEW_LINE> y = np.array(data['Price']) <NEW_LINE> mean = np.mean(x, axis=0) <NEW_LINE> std = np.std(x, axis=0) <NEW_LINE> x_norm = (x - mean) / std <NEW_LINE> train_x, test_x, train_y, test_y = train_test_split(x_norm, y, test_size=0.02) <NEW_LINE> return train_x, test_x, train_y, test_y
1、进行特征选择 2、数据正规化处理 3、划分训练集和测试集 :return: train_x, test_x, train_y, test_y
625941c0a79ad161976cc0b2
def updateNote(self, authenticationToken, note): <NEW_LINE> <INDENT> pass
Submit a set of changes to a note to the service. The provided data must include the note's guid field for identification. The note's title must also be set. @param note A Note object containing the desired fields to be populated on the service. With the exception of the note's title and guid, fields that are not being changed do not need to be set. If the content is not being modified, note.content should be left unset. If the list of resources is not being modified, note.resources should be left unset. @return The metadata (no contents) for the Note on the server after the update @throws EDAMUserException <ul> <li> BAD_DATA_FORMAT "Note.title" - invalid length or pattern </li> <li> BAD_DATA_FORMAT "Note.content" - invalid length for ENML body </li> <li> BAD_DATA_FORMAT "NoteAttributes.*" - bad resource string </li> <li> BAD_DATA_FORMAT "ResourceAttributes.*" - bad resource string </li> <li> BAD_DATA_FORMAT "Resource.mime" - invalid resource MIME type </li> <li> DATA_CONFLICT "Note.deleted" - deleted time set on active note </li> <li> DATA_REQUIRED "Resource.data" - resource data body missing </li> <li> ENML_VALIDATION "*" - note content doesn't validate against DTD </li> <li> LIMIT_REACHED "Note.tagGuids" - too many Tags on Note </li> <li> LIMIT_REACHED "Note.resources" - too many resources on Note </li> <li> LIMIT_REACHED "Note.size" - total note size too large </li> <li> LIMIT_REACHED "Resource.data.size" - resource too large </li> <li> LIMIT_REACHED "NoteAttribute.*" - attribute string too long </li> <li> LIMIT_REACHED "ResourceAttribute.*" - attribute string too long </li> <li> PERMISSION_DENIED "Note" - user doesn't own </li> <li> PERMISSION_DENIED "Note.notebookGuid" - user doesn't own destination </li> <li> QUOTA_REACHED "Accounting.uploadLimit" - note exceeds upload quota </li> <li> BAD_DATA_FORMAT "Tag.name" - Note.tagNames was provided, and one of the specified tags had an invalid length or pattern </li> <li> LIMIT_REACHED "Tag" - Note.tagNames was provided, and the required new tags would exceed the maximum number per account </li> </ul> @throws EDAMNotFoundException <ul> <li> "Note.guid" - note not found, by GUID </li> <li> "Note.notebookGuid" - if notebookGuid provided, but not found </li> </ul> Parameters: - authenticationToken - note
625941c02ae34c7f2600d09f
def __init__(self, base, length, amount, encode, decode): <NEW_LINE> <INDENT> self.base = base <NEW_LINE> self.length = length <NEW_LINE> self.amount = amount <NEW_LINE> self.encode = encode <NEW_LINE> self.decode = decode <NEW_LINE> self.i = 0 <NEW_LINE> self.lowerBound = 0 <NEW_LINE> self.upperBound = pow(self.base, self.length)
Base to generate in, the length of the hashes, amount of hashes to generate, conversion function
625941c007d97122c41787f4
def chunk_dates(self, job: Dict, skeleton: Dict = None) -> List[Dict]: <NEW_LINE> <INDENT> data = [] <NEW_LINE> skeleton = deepcopy(skeleton) or {} <NEW_LINE> job = deepcopy(job) <NEW_LINE> period = job.pop('period', None) <NEW_LINE> isolate = job.pop('isolate_days', None) <NEW_LINE> period_patterns = ['last_[0-9]+_days', '[0-9]+_days_back', 'yesterday', 'today', 'previous_[0-9]+_days', 'last_week'] <NEW_LINE> custom_period_patterns = self.config.get('custom_period_patterns') <NEW_LINE> if custom_period_patterns: <NEW_LINE> <INDENT> if isinstance(custom_period_patterns, (list, tuple)): <NEW_LINE> <INDENT> for method in custom_period_patterns: <NEW_LINE> <INDENT> if isinstance(method, str): <NEW_LINE> <INDENT> period_patterns.append(method) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError(f"Pattern '{method}' expected to be str, got {type(method)}") <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError(f"'custom_period_patterns' expected to be (list, tuple), " f"got {type(custom_period_patterns)}") <NEW_LINE> <DEDENT> <DEDENT> if period: <NEW_LINE> <INDENT> date_list = [] <NEW_LINE> for pattern in period_patterns: <NEW_LINE> <INDENT> if re.match(pattern, period): <NEW_LINE> <INDENT> logger.debug(f"Found period '{period}' for job {job}") <NEW_LINE> method_name = pattern.replace('[0-9]+', 'x', 1) <NEW_LINE> try: <NEW_LINE> <INDENT> date_list = getattr(self, method_name)(period) <NEW_LINE> <DEDENT> except TypeError: <NEW_LINE> <INDENT> date_list = getattr(self, method_name)() <NEW_LINE> <DEDENT> break <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(f"Unsupported period requested: {period}. Valid (basic) options are: " f"'last_X_days', 'X_days_back', 'yesterday', 'today', 'previous_[0-9]+_days', " f"'last_week'") <NEW_LINE> <DEDENT> if isolate: <NEW_LINE> <INDENT> assert len(date_list) > 0, f"The chunking period: {period} did not generate date_list. Bad." <NEW_LINE> for d in date_list: <NEW_LINE> <INDENT> data.append({**job, **skeleton, 'date_list': [d]}) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if len(date_list) > 1: <NEW_LINE> <INDENT> logger.debug("Running chunking for multiple days, but without date isolation. " "Your workers might feel bad.") <NEW_LINE> <DEDENT> data.append({**job, **skeleton, 'date_list': date_list}) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> logger.debug(f"No `period` chunking requested in job {job}") <NEW_LINE> data.append({**job, **skeleton}) <NEW_LINE> <DEDENT> return data
There is a support for multiple not nested parameters to chunk. Dates is one very specific of them.
625941c0aad79263cf3909ab
def AERONETtimeProcess(data, freq = 'day', window = False, **kwargs): <NEW_LINE> <INDENT> timedf = pd.DataFrame() <NEW_LINE> timedf['year'] = data.dateTimeLocal.dt.year <NEW_LINE> timedf['YYMM'] = data.dateTimeLocal.dt.to_period('M') <NEW_LINE> timedf['date'] = data.dateTimeLocal.dt.to_period('D') <NEW_LINE> timedf['time'] = data.dateTimeLocal.dt.time <NEW_LINE> timedf['hour'] = data.dateTimeLocal.dt.to_period('H') <NEW_LINE> if ~window: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if window: <NEW_LINE> <INDENT> span = kwargs['span'] <NEW_LINE> starttime = pd.to_datetime(span[0], format = '%H:%M:%S') <NEW_LINE> endtime = pd.to_datetime(span[1], format = '%H:%M:%S') <NEW_LINE> mask = (timedf['time'] >= starttime.time()) & (timedf['time'] <= endtime.time()) <NEW_LINE> data = data[mask] <NEW_LINE> <DEDENT> if freq == 'original': <NEW_LINE> <INDENT> data_mean = data.copy() <NEW_LINE> data_std = None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if freq == 'hour': <NEW_LINE> <INDENT> data_mean = data.groupby([timedf['hour'], 'Site']).mean() <NEW_LINE> data_std = data.groupby([timedf['hour'], 'Site']).std() <NEW_LINE> count = data.groupby([timedf['hour'], 'Site']).count() <NEW_LINE> <DEDENT> if freq == 'day': <NEW_LINE> <INDENT> data_mean = data.groupby([timedf['date'], 'Site']).mean() <NEW_LINE> data_std = data.groupby([timedf['date'], 'Site']).std() <NEW_LINE> count = data.groupby([timedf['date'], 'Site']).count() <NEW_LINE> <DEDENT> if freq == 'month': <NEW_LINE> <INDENT> data_mean = data.groupby([timedf['YYMM'], 'Site']).mean() <NEW_LINE> data_std = data.groupby([timedf['YYMM'], 'Site']).std() <NEW_LINE> count = data.groupby([timedf['YYMM'], 'Site']).count() <NEW_LINE> <DEDENT> data_mean.reset_index(inplace = True) <NEW_LINE> data_std.reset_index(inplace = True) <NEW_LINE> data_mean['num'] = count.values[:, 0] <NEW_LINE> data_std['num'] = count.values[:, 0] <NEW_LINE> data_mean['dateTime'] = pd.to_datetime(data_mean['timeStamp'] * 10 ** 9) <NEW_LINE> data_std['dateTime'] = pd.to_datetime(data_mean['timeStamp'] * 10 ** 9) <NEW_LINE> data_mean['dateTimeLocal'] = pd.to_datetime(data_mean['timeStampLocal'] * 10 ** 9) <NEW_LINE> data_std['dateTimeLocal'] = pd.to_datetime(data_mean['timeStampLocal'] * 10 ** 9) <NEW_LINE> <DEDENT> return data_mean, data_std
Function to process AERONET. data: outputs of AERONETinversion or AERONETdirectSun. freq: time processing frequency, chosse from 'original', 'month', 'day' and 'hour'. window: whether use a time window in case of, e.g, satellite overpass period. If False, then use all records. span: if period is True, then specify the time window in '**kwarg', e.g. if the time window is 13 p.m -14 p.m., then specify as ['13:00:00', '14:00:00']. Note the time is LOCAL time, NOT the UTC. Return: Dataframes contain temporal mean and std of each AERONET site. @author: Sunji Last updated date: 2019-10-25
625941c0dc8b845886cb54a1
def batchnorm_backward(dout, cache): <NEW_LINE> <INDENT> dx, dgamma, dbeta = None, None, None <NEW_LINE> (x, x_mean, x_var, x_norm, gamma, beta, eps) = cache <NEW_LINE> N = x.shape[0] <NEW_LINE> dx_norm = dout * gamma <NEW_LINE> dx_var = np.sum(dx_norm * (x - x_mean) * -0.5 * ((x_var + eps) ** -1.5), axis=0) <NEW_LINE> dx_mean = np.sum(dx_norm * -1. / np.sqrt(x_var + eps), axis=0) + dx_var * -2. * np.sum(x - x_mean, axis=0) / N <NEW_LINE> dx = dx_norm / np.sqrt(x_var + eps) + dx_var * 2 * (x - x_mean) / N + dx_mean / N <NEW_LINE> dgamma = np.sum(dout * x_norm, axis=0) <NEW_LINE> dbeta = np.sum(dout, axis=0) <NEW_LINE> return dx, dgamma, dbeta
Backward pass for batch normalization. For this implementation, you should write out a computation graph for batch normalization on paper and propagate gradients backward through intermediate nodes. Inputs: - dout: Upstream derivatives, of shape (N, D) - cache: Variable of intermediates from batchnorm_forward. Returns a tuple of: - dx: Gradient with repsect to inputs x, of shape (N, D) - dgamma: Gradient with repsect to scale parameter gamma, of shape (D,) - dbeta: Gradient with repsect to shift parameter beta, of shape (D,)
625941c082261d6c526ab409
def reverse_sublist(vector, start, end): <NEW_LINE> <INDENT> endrange = math.ceil((end - start) / 2) <NEW_LINE> for i in range(0, endrange): <NEW_LINE> <INDENT> dest = end - i - 1 <NEW_LINE> vector[start + i], vector[dest] = vector[dest], vector[start + i]
Helper function to reverse the elements from start to end.
625941c021a7993f00bc7c59
def function_format_example(a,b,c): <NEW_LINE> <INDENT> pass
Name of creator: Noam. Please change comments and functions names to english :) Function description. Args: a: description. b: description. c: description. Returns: Description.
625941c08a349b6b435e80e1
def decoding(self, information: List[int]) -> List[int]: <NEW_LINE> <INDENT> log.info("Decode package {0} by convolution decoder".format(information)) <NEW_LINE> info_divided_into_steps = self.__divide_into_steps(information) <NEW_LINE> last_step = [[0, []]] + [[self.__MAX_STEPS, []] for x in range(2 ** self._countRegisters - 1)] <NEW_LINE> for iterator in info_divided_into_steps: <NEW_LINE> <INDENT> now_step = [[self.__MAX_STEPS, []] for x in range(2 ** self._countRegisters)] <NEW_LINE> number: int = 0 <NEW_LINE> for info_about_vertex in last_step: <NEW_LINE> <INDENT> vertex_step: int = self._graph[number][0][0] <NEW_LINE> distance: int = get_hamming_distance(iterator, self._graph[number][0][1]) <NEW_LINE> if now_step[vertex_step][0] > last_step[number][0] + distance: <NEW_LINE> <INDENT> now_step[vertex_step] = [info_about_vertex[0] + distance, info_about_vertex[1] + [0]] <NEW_LINE> <DEDENT> vertex_step: int = self._graph[number][1][0] <NEW_LINE> distance: int = get_hamming_distance(iterator, self._graph[number][1][1]) <NEW_LINE> if now_step[vertex_step][0] > last_step[number][0] + distance: <NEW_LINE> <INDENT> now_step[vertex_step] = [info_about_vertex[0] + distance, info_about_vertex[1] + [1]] <NEW_LINE> <DEDENT> number += 1 <NEW_LINE> <DEDENT> last_step = now_step <NEW_LINE> <DEDENT> min_answer: list = [] <NEW_LINE> min_cost: int = self.__MAX_STEPS <NEW_LINE> for iterator in last_step: <NEW_LINE> <INDENT> if min_cost > iterator[0]: <NEW_LINE> <INDENT> min_cost = iterator[0] <NEW_LINE> min_answer = iterator[1] <NEW_LINE> <DEDENT> <DEDENT> return min_answer
Decoding of convolution coder :param information: List[int] :return: List[int]
625941c0d18da76e23532440
def testMultiAssortedSpaceSearch(self): <NEW_LINE> <INDENT> form = self.form <NEW_LINE> form.enter_value('accids', 'MGI:5812656 J:151466 10.1534/genetics.114.161455 GO_REF:0000033') <NEW_LINE> form.click_search() <NEW_LINE> table_element = self.driver.find_element(By.ID, "resultsTable") <NEW_LINE> table = Table(table_element) <NEW_LINE> jnum_cells = table.get_column_cells(2) <NEW_LINE> jnums = iterate.getTextAsList(jnum_cells) <NEW_LINE> print(jnums) <NEW_LINE> self.assertEqual(jnums, ['','J:237402', 'J:212979', 'J:161428', 'J:151466'])
@Status Tests that a list of multiple space separated assorted IDs returns the correct results @See MBIB-search-3
625941c076e4537e8c3515de
def remove(self, *args): <NEW_LINE> <INDENT> val = _libsbol.OwnedLocation_remove(self, *args) <NEW_LINE> self.thisown = True <NEW_LINE> return val
Remove an object from the list of objects and destroy it. Parameters ---------- * `uri` : The identity of the object to be destroyed. This can be a displayId of the object or a full URI may be provided. * `index` : A numerical index for the object.
625941c0596a897236089a30
def rmon_alarm_entry_alarm_owner(self, **kwargs): <NEW_LINE> <INDENT> config = ET.Element("config") <NEW_LINE> rmon = ET.SubElement(config, "rmon", xmlns="urn:brocade.com:mgmt:brocade-rmon") <NEW_LINE> if kwargs.pop('delete_rmon', False) is True: <NEW_LINE> <INDENT> delete_rmon = config.find('.//*rmon') <NEW_LINE> delete_rmon.set('operation', 'delete') <NEW_LINE> <DEDENT> alarm_entry = ET.SubElement(rmon, "alarm-entry") <NEW_LINE> if kwargs.pop('delete_alarm_entry', False) is True: <NEW_LINE> <INDENT> delete_alarm_entry = config.find('.//*alarm-entry') <NEW_LINE> delete_alarm_entry.set('operation', 'delete') <NEW_LINE> <DEDENT> alarm_index_key = ET.SubElement(alarm_entry, "alarm-index") <NEW_LINE> alarm_index_key.text = kwargs.pop('alarm_index') <NEW_LINE> if kwargs.pop('delete_alarm_index', False) is True: <NEW_LINE> <INDENT> delete_alarm_index = config.find('.//*alarm-index') <NEW_LINE> delete_alarm_index.set('operation', 'delete') <NEW_LINE> <DEDENT> alarm_owner = ET.SubElement(alarm_entry, "alarm-owner") <NEW_LINE> if kwargs.pop('delete_alarm_owner', False) is True: <NEW_LINE> <INDENT> delete_alarm_owner = config.find('.//*alarm-owner') <NEW_LINE> delete_alarm_owner.set('operation', 'delete') <NEW_LINE> <DEDENT> alarm_owner.text = kwargs.pop('alarm_owner') <NEW_LINE> callback = kwargs.pop('callback', self._callback) <NEW_LINE> return callback(config)
Auto Generated Code
625941c0a05bb46b383ec791
def _build_panel(self, i, j): <NEW_LINE> <INDENT> dy = self.wing.planform_wingspan / self.mesh.n <NEW_LINE> y_A = -self.wing.planform_wingspan / 2 + j * dy <NEW_LINE> y_B = y_A + dy <NEW_LINE> y_C, y_D = y_A, y_B <NEW_LINE> y_pc = y_A + dy / 2 <NEW_LINE> c_AC, c_BD, c_pc = [ get_chord_at_section( y, root_chord=self.wing.root_chord, tip_chord=self.wing.tip_chord, span=self.wing.planform_wingspan, ) for y in (y_A, y_B, y_pc) ] <NEW_LINE> dx_AC, dx_BD, dx_pc = [c / self.mesh.m for c in (c_AC, c_BD, c_pc)] <NEW_LINE> r, s, q = [ get_quarter_chord_x(y, cr=self.wing.root_chord, sweep=self.wing.sweep_angle) for y in (y_A, y_B, y_pc) ] <NEW_LINE> x_A = (r - c_AC / 4) + i * dx_AC <NEW_LINE> x_B = (s - c_BD / 4) + i * dx_BD <NEW_LINE> x_C = x_A + dx_AC <NEW_LINE> x_D = x_B + dx_BD <NEW_LINE> x_pc = (q - c_pc / 4) + (i + 3 / 4) * dx_pc <NEW_LINE> x = np.array([x_A, x_B, x_D, x_C]) <NEW_LINE> y = np.array([y_A, y_B, y_D, y_C]) <NEW_LINE> z = np.tan(self.wing.dihedral_angle) * np.abs(y) <NEW_LINE> panel = np.stack((x, y, z), axis=-1) <NEW_LINE> z_pc = np.tan(self.wing.dihedral_angle) * np.abs(y_pc) <NEW_LINE> pc = np.array([x_pc, y_pc, z_pc]) <NEW_LINE> return panel, pc
Build a wing panel indexed by its chord and spanwise indices. Parameters ---------- i : int Panel chordwise index. j : int Panel spanwise index. Returns ------- panel : np.ndarray, shape (4, 3) Array containing the (x,y,z) coordinates of the (`i`, `j`)-th panel's vertices (sorted A-B-D-C). pc : np.ndarray, shape (3, ) (x,y,z) coordinates of the (`i`, `j`)-th panel's collocation point.
625941c038b623060ff0ad5c
def getBlake2HashedString(self, textToHash, digestSize=DigestSize.sz32): <NEW_LINE> <INDENT> if len(textToHash) > 0: <NEW_LINE> <INDENT> if digestSize == DigestSize.sz64: <NEW_LINE> <INDENT> return hashlib.blake2b(str.encode(textToHash), digest_size=64).hexdigest() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return hashlib.blake2s(str.encode(textToHash), digest_size=32).hexdigest() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return None
Returns a Blake2 digest using text passed to the method. @param textToHash: input text to output as a hash digest @type textToHas: String @param digestSize = specifies the digest size to use for platform dependency. @type digestSize: Enumerator @return String
625941c067a9b606de4a7e28
def replace_namespaced_pod(self, body, namespace, name, **kwargs): <NEW_LINE> <INDENT> all_params = ['body', 'namespace', 'name', 'pretty'] <NEW_LINE> all_params.append('callback') <NEW_LINE> params = locals() <NEW_LINE> for key, val in iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method replace_namespaced_pod" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('body' not in params) or (params['body'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_pod`") <NEW_LINE> <DEDENT> if ('namespace' not in params) or (params['namespace'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_pod`") <NEW_LINE> <DEDENT> if ('name' not in params) or (params['name'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_pod`") <NEW_LINE> <DEDENT> resource_path = '/api/v1/namespaces/{namespace}/pods/{name}'.replace('{format}', 'json') <NEW_LINE> path_params = {} <NEW_LINE> if 'namespace' in params: <NEW_LINE> <INDENT> path_params['namespace'] = params['namespace'] <NEW_LINE> <DEDENT> if 'name' in params: <NEW_LINE> <INDENT> path_params['name'] = params['name'] <NEW_LINE> <DEDENT> query_params = {} <NEW_LINE> if 'pretty' in params: <NEW_LINE> <INDENT> query_params['pretty'] = params['pretty'] <NEW_LINE> <DEDENT> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> if 'body' in params: <NEW_LINE> <INDENT> body_params = params['body'] <NEW_LINE> <DEDENT> header_params['Accept'] = self.api_client. select_header_accept(['application/json', 'application/yaml']) <NEW_LINE> if not header_params['Accept']: <NEW_LINE> <INDENT> del header_params['Accept'] <NEW_LINE> <DEDENT> header_params['Content-Type'] = self.api_client. select_header_content_type(['*/*']) <NEW_LINE> auth_settings = [] <NEW_LINE> response = self.api_client.call_api(resource_path, 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Pod', auth_settings=auth_settings, callback=params.get('callback')) <NEW_LINE> return response
replace the specified Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.replace_namespaced_pod(body, namespace, name, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param V1Pod body: (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str name: name of the Pod (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Pod If the method is called asynchronously, returns the request thread.
625941c01d351010ab855a8a
def test_comparison_on_different_application_data(self): <NEW_LINE> <INDENT> a = attributes.ApplicationSpecificInformation( application_data="test_data_1" ) <NEW_LINE> b = attributes.ApplicationSpecificInformation( application_data="test_data_2" ) <NEW_LINE> self.assertFalse(a == b) <NEW_LINE> self.assertFalse(b == a) <NEW_LINE> self.assertTrue(a != b) <NEW_LINE> self.assertTrue(b != a)
Test that the equality/inequality operators return False/True when comparing two ApplicationSpecificInformation objects with different data.
625941c0099cdd3c635f0bc9
def _reduce_anchor_funcs(funcs): <NEW_LINE> <INDENT> _validate_funcs(funcs) <NEW_LINE> if isinstance(funcs, Attr): <NEW_LINE> <INDENT> return funcs <NEW_LINE> <DEDENT> attr = Attr() <NEW_LINE> if isinstance(funcs[0], Attr): <NEW_LINE> <INDENT> attr = funcs[0] <NEW_LINE> funcs = funcs[1:] <NEW_LINE> <DEDENT> for func in funcs: <NEW_LINE> <INDENT> add_to_attr(attr, func) <NEW_LINE> <DEDENT> return attr
Merge a list of AnchorFuncs into an Attr. If the first element is an Attr, then we merge into that Attr.
625941c0dd821e528d63b118
def reverse(self, x): <NEW_LINE> <INDENT> v = -x if x < 0 else x <NEW_LINE> lst = reversed([int(s) for s in str(v)]) <NEW_LINE> ans = reduce(lambda a, b: a * 10 + b, lst, 0) <NEW_LINE> ans = -ans if x < 0 else ans <NEW_LINE> if ans < int_max_neg or ans > int_max: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return ans
:type x: int :rtype: int
625941c01f037a2d8b94616c
def config_callback(self, config, level): <NEW_LINE> <INDENT> self.pause_time = config['pause_time'] <NEW_LINE> self.world.sigma_m_sq = config['sigma_m_sq'] <NEW_LINE> self.world.sigma_z_sq = config['sigma_z_sq'] <NEW_LINE> return config
Get the pause_time, movement noise, and measurement noise
625941c08da39b475bd64ede
def make_handler(include_paths, name): <NEW_LINE> <INDENT> if name == DEFAULT_HANDLER_NAME: <NEW_LINE> <INDENT> raise ValueError('Handler name "{}" is reserved for internal use'.format(name)) <NEW_LINE> <DEDENT> return _make_handler(include_paths, name)
Returns an include handler and name (for use with make_resolver) that uses the given include paths.
625941c0167d2b6e31218b03
def tree_to_xml(tree): <NEW_LINE> <INDENT> if IsString(tree): <NEW_LINE> <INDENT> return tree <NEW_LINE> <DEDENT> if any(map(IsString, tree)) and len(tree) > 1: <NEW_LINE> <INDENT> raise ValueError('Leaves should be attached to preterminals: {0}'.format(tree)) <NEW_LINE> <DEDENT> return '<tree label="{0}">{1}</tree>'.format( tree.label(), ''.join(map(tree_to_xml, tree)))
Returns string XML representation of Tree, in Moses format. E.g.: (NP (DT the) (NN house)) is converted to (without newlines): <tree label="NP"> <tree label="DT">the</tree> <tree label="NN">house</tree> </tree>
625941c0cc0a2c11143dcdfe
def info(self, sha256): <NEW_LINE> <INDENT> return self._connection.get(api_path_by_module(self, sha256))
Return info for the the file with the given sha256. Required: sha256 : File key (string) Throws a Client exception if the file does not exist.
625941c05fdd1c0f98dc01a0
def IshiyamaAlpha(M, Mcrit): <NEW_LINE> <INDENT> alpha = -0.123*np.log10(M/Mcrit)+1.461 <NEW_LINE> if isinstance(alpha, float): <NEW_LINE> <INDENT> alpha = np.max([1.0, alpha]) <NEW_LINE> alpha = np.min([1.461, alpha]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> alpha[alpha < 1] = 1. <NEW_LINE> alpha[alpha > 1.461] = 1.461 <NEW_LINE> <DEDENT> return alpha
Fitting function for alpha (parameter for modified NFW) from Ishiyama 2014: http://adsabs.harvard.edu/abs/2014ApJ...788...27I :param M: Virial mass of a halo in solar masses. :return: alpha (inner slope)
625941c096565a6dacc8f639
def box_add(self, box_name, box_url): <NEW_LINE> <INDENT> command = "box add '{}' '{}'".format(box_name, box_url) <NEW_LINE> self._call_vagrant_command(command)
Adds a box with given name, from given url.
625941c01f5feb6acb0c4ac1
def toggle_bit(x, k): <NEW_LINE> <INDENT> return x ^ (1 << k)
Return x with the k-th bit toggled.
625941c07b180e01f3dc476f
def process_died(self, process_name, exit_code): <NEW_LINE> <INDENT> pass
Notifies listener that process has died. This callback only occurs for processes that die during normal process monitor execution -- processes that are forcibly killed during L{roslaunch.pmon.ProcessMonitor} shutdown are not reported. @param process_name: name of process @type process_name: str @param exit_code int: exit code of process. If None, it means that L{roslaunch.pmon.ProcessMonitor} was unable to determine an exit code. @type exit_code: int
625941c056b00c62f0f145c6
def testDoubleListener(self): <NEW_LINE> <INDENT> context = self.framework.get_bundle_context() <NEW_LINE> assert isinstance(context, BundleContext) <NEW_LINE> self.assertTrue(context.add_service_listener(self), "Can't register the service listener") <NEW_LINE> log_off() <NEW_LINE> self.assertFalse(context.add_service_listener(self), "Service listener registered twice") <NEW_LINE> log_on() <NEW_LINE> self.assertTrue(context.remove_service_listener(self), "Can't unregister the service listener") <NEW_LINE> log_off() <NEW_LINE> self.assertFalse(context.remove_service_listener(self), "Service listener unregistered twice") <NEW_LINE> log_on()
Tests double registration / unregistration
625941c096565a6dacc8f63a
def printTable(self, table_format="grid"): <NEW_LINE> <INDENT> header, values = self.exportData() <NEW_LINE> print(tabulate([values], headers=header, tablefmt=table_format))
Print table with trade data.
625941c010dbd63aa1bd2b12
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params, method, pickler=None): <NEW_LINE> <INDENT> fit_params = fit_params if fit_params is not None else {} <NEW_LINE> fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) <NEW_LINE> X_train, y_train = _safe_split(estimator, X, y, train) <NEW_LINE> X_test, _ = _safe_split(estimator, X, y, test, train) <NEW_LINE> if y_train is None: <NEW_LINE> <INDENT> estimator.fit(X_train, **fit_params) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> estimator.fit(X_train, y_train, **fit_params) <NEW_LINE> <DEDENT> func = getattr(estimator, method) <NEW_LINE> predictions = func(X_test) <NEW_LINE> if method in ['decision_function', 'predict_proba', 'predict_log_proba'] and is_classifier(estimator): <NEW_LINE> <INDENT> n_classes = len(set(y)) <NEW_LINE> if n_classes != len(estimator.classes_): <NEW_LINE> <INDENT> recommendation = ( 'To fix this, use a cross-validation ' 'technique resulting in properly ' 'stratified folds') <NEW_LINE> warnings.warn('Number of classes in training fold ({}) does ' 'not match total number of classes ({}). ' 'Results may not be appropriate for your use case. ' '{}'.format(len(estimator.classes_), n_classes, recommendation), RuntimeWarning) <NEW_LINE> if method == 'decision_function': <NEW_LINE> <INDENT> if (predictions.ndim == 2 and predictions.shape[1] != len(estimator.classes_)): <NEW_LINE> <INDENT> raise ValueError('Output shape {} of {} does not match ' 'number of classes ({}) in fold. ' 'Irregular decision_function outputs ' 'are not currently supported by ' 'cross_val_predict'.format( predictions.shape, method, len(estimator.classes_), recommendation)) <NEW_LINE> <DEDENT> if len(estimator.classes_) <= 2: <NEW_LINE> <INDENT> raise ValueError('Only {} class/es in training fold, this ' 'is not supported for decision_function ' 'with imbalanced folds. {}'.format( len(estimator.classes_), recommendation)) <NEW_LINE> <DEDENT> <DEDENT> float_min = np.finfo(predictions.dtype).min <NEW_LINE> default_values = {'decision_function': float_min, 'predict_log_proba': float_min, 'predict_proba': 0} <NEW_LINE> predictions_for_all_classes = np.full((_num_samples(predictions), n_classes), default_values[method]) <NEW_LINE> predictions_for_all_classes[:, estimator.classes_] = predictions <NEW_LINE> predictions = predictions_for_all_classes <NEW_LINE> <DEDENT> <DEDENT> if pickler is not None: <NEW_LINE> <INDENT> predictions = pickler.pickle_data(predictions) <NEW_LINE> <DEDENT> return predictions, test
Please see sklearn for documenation This has only been modified so binned regressors can return probabilites and predictions can be pickled
625941c0d486a94d0b98e0b3
def delete_rows(self, row_indices): <NEW_LINE> <INDENT> Matrix.delete_rows(self, row_indices) <NEW_LINE> self.validate_filters()
delete rows too expensive to do so
625941c0d99f1b3c44c67502
def __init__(self, *args): <NEW_LINE> <INDENT> this = _AriaPy.new_ArRobotPacketSender(*args) <NEW_LINE> try: self.this.append(this) <NEW_LINE> except: self.this = this
__init__(self, unsigned char sync1 = 0xfa, unsigned char sync2 = 0xfb) -> ArRobotPacketSender __init__(self, unsigned char sync1 = 0xfa) -> ArRobotPacketSender __init__(self) -> ArRobotPacketSender __init__(self, ArDeviceConnection deviceConnection, unsigned char sync1 = 0xfa, unsigned char sync2 = 0xfb) -> ArRobotPacketSender __init__(self, ArDeviceConnection deviceConnection, unsigned char sync1 = 0xfa) -> ArRobotPacketSender __init__(self, ArDeviceConnection deviceConnection) -> ArRobotPacketSender __init__(self, ArDeviceConnection deviceConnection, unsigned char sync1, unsigned char sync2, bool tracking, char trackingLogName) -> ArRobotPacketSender
625941c063b5f9789fde7053
@login_required(login_url='/account/login/') <NEW_LINE> def edit_post(request, post_id): <NEW_LINE> <INDENT> post = get_object_or_404(Post, pk=post_id) <NEW_LINE> if request.user.is_staff: <NEW_LINE> <INDENT> if request.method == "POST": <NEW_LINE> <INDENT> form = BlogPostForm(request.POST, request.FILES, instance=post) <NEW_LINE> if form.is_valid(): <NEW_LINE> <INDENT> post = form.save(commit=False) <NEW_LINE> post.author = request.user <NEW_LINE> post.published_date = timezone.now() <NEW_LINE> post.save() <NEW_LINE> return redirect(post_detail, post.pk) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> form = BlogPostForm(instance=post) <NEW_LINE> <DEDENT> args = { 'form': form, 'form_title': 'Edit ' + post.title } <NEW_LINE> return render(request, 'blog/blogpostform.html', args) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return redirect(reverse('blog'))
Submit or render edit blog post form
625941c0e5267d203edcdc0d
def create_display(panel_size: tuple, display_size: tuple): <NEW_LINE> <INDENT> ret = {} <NEW_LINE> addr = 0 <NEW_LINE> cord = [0, 0] <NEW_LINE> for x in range(0, display_size[0], panel_size[0]): <NEW_LINE> <INDENT> for y in range(0, display_size[1], panel_size[1]): <NEW_LINE> <INDENT> cord = [x, y] <NEW_LINE> ret[addr] = (cord, panel_size) <NEW_LINE> addr += 1 <NEW_LINE> <DEDENT> <DEDENT> return ret
Utility to create panel dict for a given panel_size and display_size Keyword arguments: panel_size -- (w, h) of panel display_size -- (w, h) of display (should be factor of panel_size)
625941c08da39b475bd64edf
def test_ap_wpa2_igtk_initial_rsc_aes_128_cmac(dev, apdev): <NEW_LINE> <INDENT> run_ap_wpa2_igtk_initial_rsc(dev, apdev, "AES-128-CMAC")
Initial management group cipher RSC (AES-128-CMAC)
625941c0851cf427c661a47f
def test_timeslice(self): <NEW_LINE> <INDENT> tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, None]) <NEW_LINE> indices = tf.placeholder(dtype=tf.int32, shape=[None]) <NEW_LINE> outputs = ops.timeslice(tensor, indices) <NEW_LINE> tensor_actual = np.array([[[0.01, 0.01, 0.01], [0.02, 0.02, 0.02], [0.03, 0.03, 0.03], [0.04, 0.04, 0.04]], [[0.1, 0.1, 0.1], [0.2, 0.2, 0.2], [23, 23, 23], [23, 23, 23]]], dtype=np.float32) <NEW_LINE> indices_actual = np.array([3, 1], dtype=np.int32) <NEW_LINE> outputs_expected = np.array([[0.04, 0.04, 0.04], [0.2, 0.2, 0.2]], dtype=np.float32) <NEW_LINE> with tf.Session() as sess: <NEW_LINE> <INDENT> sess.run(tf.global_variables_initializer()) <NEW_LINE> outputs_actual = sess.run(outputs, {tensor: tensor_actual, indices: indices_actual}) <NEW_LINE> <DEDENT> self.assertAllClose(outputs_expected, outputs_actual)
Test.
625941c007f4c71912b113ee
@app.route("/users/password", methods=["GET", "POST"]) <NEW_LINE> def users_change_password(): <NEW_LINE> <INDENT> form = ChangePasswordForm() <NEW_LINE> if not g.user: <NEW_LINE> <INDENT> flash("Access unauthorized.", "danger") <NEW_LINE> return redirect("/") <NEW_LINE> <DEDENT> if form.validate_on_submit(): <NEW_LINE> <INDENT> if User.authenticate(g.user.username, form.password.data): <NEW_LINE> <INDENT> print(form.new_password.data) <NEW_LINE> print(form.new_password_check.data) <NEW_LINE> if form.new_password.data == form.new_password_check.data: <NEW_LINE> <INDENT> g.user.change_password(form.new_password.data) <NEW_LINE> db.session.commit() <NEW_LINE> flash("Password changed succesfully", "success") <NEW_LINE> return redirect(url_for('users_show', user_id=g.user.id)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> form.new_password.errors.append('New passwords do not match') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> form.password.errors.append('Incorrect Password') <NEW_LINE> <DEDENT> <DEDENT> return render_template("users/change-password.html", form=form)
Allow a user to change their password
625941c0bde94217f3682d61
def messageOFF(): <NEW_LINE> <INDENT> if MySet.sendMessageOFF: <NEW_LINE> <INDENT> Parent.SendStreamMessage(MySet.messageOFF) <NEW_LINE> <DEDENT> if MySet.DsendMessageOFF: <NEW_LINE> <INDENT> url = "https://www.twitch.tv/" + Parent.GetChannelName() <NEW_LINE> message = MySet.DmessageOFF.format(url) <NEW_LINE> Parent.SendDiscordMessage(message)
Send message when going live if enabled
625941c0a8370b771705280e
def read_six_etf_list() -> pandas.DataFrame: <NEW_LINE> <INDENT> return pandas.read_csv(os.path.join(data_dir(), 'six_etf.csv'), header=0, index_col=0, sep=',', encoding='utf-8')
Read available ETFs traded on the SIX stock exchange
625941c03cc13d1c6d3c72e9
def sample_user(email='test@londonappdev.com', password='testpass'): <NEW_LINE> <INDENT> return get_user_model().objects.create_user(email, password)
Create a sample user to help with testing tag model
625941c045492302aab5e22f
def alpha_index_series() -> pd.Series: <NEW_LINE> <INDENT> return pd.Series(list(range(1, 27)), index=[chr(x) for x in range(97, 123)])
Create a Series with values 1, 2, ... 25, 26 of type int64 and add an index with values a, b, ... y, z so index 'a'=1, 'b'=2 ... 'y'=25, 'z'=26 Don't worry about the series name.
625941c0baa26c4b54cb1090
def __init__( self, graph: dict[V, Any] | None = None, *, is_directed: bool = True, weight: float = 1, **kwargs: Any, ) -> None: <NEW_LINE> <INDENT> self.is_directed = is_directed <NEW_LINE> self._graph = {} <NEW_LINE> if graph is not None: <NEW_LINE> <INDENT> for u in graph: <NEW_LINE> <INDENT> self.add_node(u) <NEW_LINE> <DEDENT> for u in graph: <NEW_LINE> <INDENT> for v in graph[u]: <NEW_LINE> <INDENT> if v not in self._graph: <NEW_LINE> <INDENT> self.add_node(v) <NEW_LINE> <DEDENT> if isinstance(graph[u], dict): <NEW_LINE> <INDENT> edge = graph[u][v] <NEW_LINE> if isinstance(edge, Edge): <NEW_LINE> <INDENT> self.add_edge(**edge) <NEW_LINE> <DEDENT> elif isinstance(edge, dict): <NEW_LINE> <INDENT> self.add_edge(u, v, **edge) <NEW_LINE> <DEDENT> elif isinstance(edge, (int, float)): <NEW_LINE> <INDENT> self.add_edge(u, v, weight=edge, **kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError(f"{edge} is not a supported Edge type.") <NEW_LINE> <DEDENT> <DEDENT> elif isinstance(graph[u], (list, tuple, set, frozenset)): <NEW_LINE> <INDENT> self.add_edge(u, v, weight) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TypeError(f"{graph[u]} is not a supported Edge mapping.")
Default constructor assumes an adjacency list representation.
625941c0be383301e01b53f7
def get(self, **kwargs): <NEW_LINE> <INDENT> if(self.tier in self.master_plus): <NEW_LINE> <INDENT> req = self.base + self.platform + '/tft/league/v1/' + self.tier <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> req = self.base + self.platform + self.tier + self.division <NEW_LINE> <DEDENT> res = requests.get(req, headers=self.api_key) <NEW_LINE> self.data = res.json()
Retrieves current players by platform, tier and division
625941c08c3a873295158326
def reportMatch(winner, loser): <NEW_LINE> <INDENT> db, cur = connect() <NEW_LINE> cur.execute( "INSERT INTO Matches (winner, loser) VALUES (%s, %s)", (winner, loser)) <NEW_LINE> db.commit() <NEW_LINE> cur.close() <NEW_LINE> db.close()
Records the outcome of a single match between two players. Args: winner: the id number of the player who won loser: the id number of the player who lost
625941c0f9cc0f698b14056b
def test_icon_png(self): <NEW_LINE> <INDENT> path = ':/plugins/RFTools/icon.png' <NEW_LINE> icon = QIcon(path) <NEW_LINE> self.assertFalse(icon.isNull())
Test we can click OK.
625941c091af0d3eaac9b984
def mu_score(community, environment=None, min_mol_weight=False, min_growth=0.1, max_uptake=10.0, abstol=1e-6, validate=False, n_solutions=100, pool_gap=0.5, verbose=True): <NEW_LINE> <INDENT> if environment: <NEW_LINE> <INDENT> environment.apply(community.merged, inplace=True, warning=False) <NEW_LINE> <DEDENT> max_uptake = max_uptake * len(community.organisms) <NEW_LINE> scores = {} <NEW_LINE> solver = solver_instance(community.merged) <NEW_LINE> for org_id in community.organisms: <NEW_LINE> <INDENT> exchange_rxns = community.organisms_exchange_reactions[org_id] <NEW_LINE> biomass_reaction = community.organisms_biomass_reactions[org_id] <NEW_LINE> community.merged.biomass_reaction = biomass_reaction <NEW_LINE> medium_list, sols = minimal_medium(community.merged, exchange_reactions=list(exchange_rxns.keys()), min_mass_weight=min_mol_weight, min_growth=min_growth, n_solutions=n_solutions, max_uptake=max_uptake, validate=validate, abstol=abstol, use_pool=True, pool_gap=pool_gap, solver=solver, warnings=False) <NEW_LINE> if medium_list: <NEW_LINE> <INDENT> counter = Counter(chain(*medium_list)) <NEW_LINE> scores[org_id] = {cnm.original_metabolite: counter[ex] / len(medium_list) for ex, cnm in exchange_rxns.items()} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if verbose: <NEW_LINE> <INDENT> warn('MUS: Failed to find a minimal growth medium for ' + org_id) <NEW_LINE> <DEDENT> scores[org_id] = None <NEW_LINE> <DEDENT> <DEDENT> return scores
Calculate frequency of metabolite requirement for species growth Zelezniak A. et al, Metabolic dependencies drive species co-occurrence in diverse microbial communities (PNAS 2015) Args: community (Community): microbial community environment (Environment): metabolic environment min_mol_weight (bool): Prefer smaller compounds (default: False) min_growth (float): minimum growth rate (default: 0.1) max_uptake (float): maximum uptake rate (default: 10) abstol (float): tolerance for detecting a non-zero exchange flux (default: 1e-6) validate (bool): validate solution using FBA (for debugging purposes, default: False) n_solutions (int): number of alternative solutions to calculate (default: 100) Returns: dict: Keys are organism names, values are dictionaries with metabolite frequencies dict: Extra information
625941c04527f215b584c3c8
def resize(self, size): <NEW_LINE> <INDENT> pass
void QByteArray.resize(int size)
625941c09c8ee82313fbb6e2
def add_status(self, date: time, stat: str): <NEW_LINE> <INDENT> self._status[date] = stat
Add new status statement to _status
625941c071ff763f4b5495f6
def test_GLSLESVersion_int(): <NEW_LINE> <INDENT> nt.eq_(int(glsl.Version('100')), 100)
generated_tests.modules.glsl.GLSLESVersion: int()
625941c0287bf620b61d39d3
def generate(self, node, index_functions=None): <NEW_LINE> <INDENT> if index_functions is None: <NEW_LINE> <INDENT> index_functions = [] <NEW_LINE> <DEDENT> x = sympy.MatrixSymbol("x", 3, 1) <NEW_LINE> expr = node(x) <NEW_LINE> out = self.edge_ce_ratio * self.edge_length * self.visit(expr) <NEW_LINE> index_vars = [] <NEW_LINE> for f in index_functions: <NEW_LINE> <INDENT> fk0 = sympy.Symbol(f"{f}k0") <NEW_LINE> fk1 = sympy.Symbol(f"{f}k1") <NEW_LINE> out = out.subs(f(self.x0), fk0) <NEW_LINE> out = out.subs(f(self.x1), fk1) <NEW_LINE> out = out.subs(f(x), 0.5 * (fk0 + fk1)) <NEW_LINE> index_vars.append([fk0, fk1]) <NEW_LINE> <DEDENT> out = out.subs(x, 0.5 * (self.x0 + self.x1)) <NEW_LINE> return out, index_vars
Entrance point to this class.
625941c00a366e3fb873e786
def asdim(dimension): <NEW_LINE> <INDENT> if isinstance(dimension, Dimension): <NEW_LINE> <INDENT> return dimension <NEW_LINE> <DEDENT> elif isinstance(dimension, (tuple, dict, str)): <NEW_LINE> <INDENT> return Dimension(dimension) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('%s type could not be interpreted as Dimension. ' 'Dimensions must be declared as a string, tuple, ' 'dictionary or Dimension type.')
Convert the input to a Dimension. Args: dimension: tuple, dict or string type to convert to Dimension Returns: A Dimension object constructed from the dimension spec. No copy is performed if the input is already a Dimension.
625941c00383005118ecf552
def isNumber(self, s): <NEW_LINE> <INDENT> s = s.strip() <NEW_LINE> numberSeen = False <NEW_LINE> eSeen = False <NEW_LINE> pointSeen = False <NEW_LINE> numberAfterE = False <NEW_LINE> for char in s: <NEW_LINE> <INDENT> if '0' <= char <= '9': <NEW_LINE> <INDENT> numberSeen = True <NEW_LINE> <DEDENT> elif char == '.': <NEW_LINE> <INDENT> if not numberSeen or pointSeen: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> pointSeen = True <NEW_LINE> <DEDENT> elif char == 'e': <NEW_LINE> <INDENT> if not numberSeen: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> eSeen = True <NEW_LINE> <DEDENT> elif char in ['+', '-']: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
:type s: str :rtype: bool
625941c030bbd722463cbd32
def setSimpleWordsList(self,simplewords): <NEW_LINE> <INDENT> if isinstance(simplewords,list): <NEW_LINE> <INDENT> self.simple_words = simplewords <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError("A simple word list should be provided as list")
Simple word list for DaleChall calculator.
625941c0be8e80087fb20bb4
def __init__(self, fileobj=None, domain=DEFAULT_DOMAIN): <NEW_LINE> <INDENT> gettext.GNUTranslations.__init__(self, fp=fileobj) <NEW_LINE> self.files = self.files = [_f for _f in [getattr(fileobj, 'name', None)] if _f] <NEW_LINE> self.domain = domain <NEW_LINE> self._domains = {}
Initialize the translations catalog. :param fileobj: the file-like object the translation should be read from
625941c021bff66bcd6848c3
def testInvalidDgPsf(self): <NEW_LINE> <INDENT> sigma1, sigma2, b = 1, 0, 0 <NEW_LINE> afwDetect.createPsf("DoubleGaussian", self.ksize, self.ksize, sigma1, sigma2, b) <NEW_LINE> def badSigma1(): <NEW_LINE> <INDENT> sigma1 = 0 <NEW_LINE> afwDetect.createPsf("DoubleGaussian", self.ksize, self.ksize, sigma1, sigma2, b) <NEW_LINE> <DEDENT> utilsTests.assertRaisesLsstCpp(self, pexExceptions.DomainErrorException, badSigma1) <NEW_LINE> def badSigma2(): <NEW_LINE> <INDENT> sigma2, b = 0, 1 <NEW_LINE> afwDetect.createPsf("DoubleGaussian", self.ksize, self.ksize, sigma1, sigma2, b) <NEW_LINE> <DEDENT> utilsTests.assertRaisesLsstCpp(self, pexExceptions.DomainErrorException, badSigma2)
Test parameters of dgPsfs, both valid and not
625941c076d4e153a657ea9e
def attributes(self, pythonify: bool = False) -> Union[Dict, List[MISPAttribute]]: <NEW_LINE> <INDENT> r = self._prepare_request('GET', 'attributes/index') <NEW_LINE> attributes_r = self._check_json_response(r) <NEW_LINE> if not (self.global_pythonify or pythonify) or 'errors' in attributes_r: <NEW_LINE> <INDENT> return attributes_r <NEW_LINE> <DEDENT> to_return = [] <NEW_LINE> for attribute in attributes_r: <NEW_LINE> <INDENT> a = MISPAttribute() <NEW_LINE> a.from_dict(**attribute) <NEW_LINE> to_return.append(a) <NEW_LINE> <DEDENT> return to_return
Get all the attributes from the MISP instance :param pythonify: Returns a list of PyMISP Objects instead of the plain json output. Warning: it might use a lot of RAM
625941c05e10d32532c5ee95
def to_sequence(self, velocity=100, instrument=0, program=None, max_note_duration=None, qpm=120.0): <NEW_LINE> <INDENT> seconds_per_step = 60.0 / (self.steps_per_quarter * qpm) <NEW_LINE> sequence = self._to_sequence( seconds_per_step=seconds_per_step, velocity=velocity, instrument=instrument, program=program, max_note_duration=max_note_duration) <NEW_LINE> sequence.tempos.add(qpm=qpm) <NEW_LINE> return sequence
Converts the Performance to NoteSequence proto. Args: velocity: MIDI velocity to give each note. Between 1 and 127 (inclusive). If the performance contains velocity events, those will be used instead. instrument: MIDI instrument to give each note. program: MIDI program to give each note, or None to use the program associated with the Performance (or the default program if none exists). max_note_duration: Maximum note duration in seconds to allow. Notes longer than this will be truncated. If None, notes can be any length. qpm: The tempo to use, in quarter notes per minute. Returns: A NoteSequence proto.
625941c0f8510a7c17cf9669
def upgrade_skill(self, which): <NEW_LINE> <INDENT> self.base_skills[which] += 1
Increase a skill's level by one. :param which: which skill to improve. :type which: sw.const.skill.Skill
625941c0e76e3b2f99f3a77e
def dispose(self): <NEW_LINE> <INDENT> pass
Dispose the listener The implementations of the MetricsListener class should override this method in order to release the resources that the listener might hold.
625941c0d4950a0f3b08c2bf