code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def theaterChase(color,wait_ms=50, times=20): <NEW_LINE> <INDENT> for j in range(times): <NEW_LINE> <INDENT> for q in range(3): <NEW_LINE> <INDENT> for i in range(0, STRIP.numPixels(), 3): <NEW_LINE> <INDENT> STRIP.setPixelColor(i+q, color) <NEW_LINE> <DEDENT> STRIP.show() <NEW_LINE> time.sleep(wait_ms/1000.0) <NEW_LINE> for i in range(0, STRIP.numPixels(), 3): <NEW_LINE> <INDENT> STRIP.setPixelColor(i+q, Color(0,0,0))
Function used to create the theater chase animation
625941c126068e7796caec51
def finally_( self, after: Callable[[Result[E, A]], Resource[R, E, Any]] ) -> Resource[R, E, A]: <NEW_LINE> <INDENT> return self.attempt().flat_map( lambda r1: after(r1) .attempt() .flat_map(lambda r2: from_result(result.sequence(r2, r1))) )
After having computed self, but before returning its result, execute the rs Resource creation. This is extremely useful when you need to perform an action, unconditionally, at the end of a resource creation, without changing its result, like executing a lifted IO.
625941c1d10714528d5ffc56
def resnet_arg_scope( is_training=True, weight_decay=cfgs.WEIGHT_DECAY, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): <NEW_LINE> <INDENT> batch_norm_params = { 'is_training': False, 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'trainable': False, 'updates_collections': tf.GraphKeys.UPDATE_OPS } <NEW_LINE> with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), trainable=is_training, activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): <NEW_LINE> <INDENT> with slim.arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc: <NEW_LINE> <INDENT> return arg_sc
In Default, we do not use BN to train resnet, since batch_size is too small. So is_training is False and trainable is False in the batch_norm params.
625941c121bff66bcd6848ca
def copy_if_newer(src, dest): <NEW_LINE> <INDENT> success = True <NEW_LINE> dest_dir = os.path.dirname(dest) <NEW_LINE> if (dest_dir is None) or (src is None): <NEW_LINE> <INDENT> success = False <NEW_LINE> return success <NEW_LINE> <DEDENT> if not os.path.exists(dest_dir): <NEW_LINE> <INDENT> mkdir_p(dest_dir) <NEW_LINE> <DEDENT> if os.path.exists(src): <NEW_LINE> <INDENT> srcmtime = os.path.getmtime(src) <NEW_LINE> try: <NEW_LINE> <INDENT> destmtime = os.path.getmtime(dest) <NEW_LINE> if srcmtime - destmtime > 1: <NEW_LINE> <INDENT> shutil.copy2(src, dest) <NEW_LINE> <DEDENT> <DEDENT> except OSError: <NEW_LINE> <INDENT> shutil.copy2(src, dest) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> success = False <NEW_LINE> <DEDENT> return success
Copy a file from src to dest if src is newer than dest. Returns True if success or False if there was a problem.
625941c197e22403b379cf0f
def search_template_files(dirname, fnames): <NEW_LINE> <INDENT> for file_name in sorted(fnames): <NEW_LINE> <INDENT> if os.path.splitext(file_name)[1] in file_formats: <NEW_LINE> <INDENT> yield os.path.join(dirname, file_name)
Search for suitable files
625941c1ec188e330fd5a719
def for_user(self, user, model=None): <NEW_LINE> <INDENT> qs = self.get_queryset().filter(user=user) <NEW_LINE> if model: <NEW_LINE> <INDENT> if isinstance(model, str): <NEW_LINE> <INDENT> model = get_model(*model.split(".")) <NEW_LINE> <DEDENT> content_type = ContentType.objects.get_for_model(model) <NEW_LINE> qs = qs.filter(target_content_type=content_type) <NEW_LINE> <DEDENT> return qs.order_by("-timestamp")
Returns a Favorite objects queryset for a given user. If a model params is provided, it returns only the favorited objects of that model class Usage: Favorite.objects.for_user(user) Favorite.objects.for_user(user, model=Song) Favorite.objects.for_user(user, model="music.song")
625941c185dfad0860c3adcf
def _stop_aware_put(self, data): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> self._results_queue.put(data, block=True, timeout=IO_TIMEOUT_INTERVAL_S) <NEW_LINE> return <NEW_LINE> <DEDENT> except queue.Full: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> if self._stop_event.is_set(): <NEW_LINE> <INDENT> raise WorkerTerminationRequested()
This method is called to write the results to the results queue. We use `put` in a non-blocking way so we can gracefully terminate the worker thread without being stuck on Queue.put. The method raises WorkerTerminationRequested exception that should be passed through all the way up to WorkerThread.run which will gracefully terminate main worker loop
625941c1fbf16365ca6f6135
def bathy_along_track(Float, hpids, bathy_file=None): <NEW_LINE> <INDENT> __, idxs = Float.get_profiles(hpids, ret_idxs=True) <NEW_LINE> lons = Float.lon_start[idxs] <NEW_LINE> lats = Float.lat_start[idxs] <NEW_LINE> dist = Float.dist[idxs] <NEW_LINE> bathy = sandwell.interp_track(lons, lats, bathy_file) <NEW_LINE> plt.figure() <NEW_LINE> plt.plot(dist, bathy) <NEW_LINE> plt.xlabel('Distance (km)') <NEW_LINE> plt.ylabel('Depth (m)')
TODO: Docstring...
625941c18da39b475bd64ee7
def validate(self): <NEW_LINE> <INDENT> required_fields = filter(lambda x: self._fields[x], self._fields.keys()) <NEW_LINE> for field in (self._top_fields + required_fields): <NEW_LINE> <INDENT> if not hasattr(self, field) or hasattr(self, field) and getattr(self, field) == None: <NEW_LINE> <INDENT> raise MissingFieldError(field)
Validate the message. Ensure all the required fields are present and not None.
625941c14527f215b584c3cf
def reset(self): <NEW_LINE> <INDENT> self._i0 = 0 <NEW_LINE> self._len = 0 <NEW_LINE> self._x.fill(0)
Override.
625941c1d10714528d5ffc57
def login(request, *args, **kwargs): <NEW_LINE> <INDENT> response = django_login_view(request, *args, **kwargs) <NEW_LINE> if request.method == "POST" and request.POST.get("form") == "OTHER_LOGIN": <NEW_LINE> <INDENT> response.context_data['form'] = response.context_data['form'].__class__(request) <NEW_LINE> other_form = LoginForm(request.POST) <NEW_LINE> if other_form.is_valid(): <NEW_LINE> <INDENT> other_form.save(request=request) <NEW_LINE> messages.success(request, "Check your email! You have been sent the login link.") <NEW_LINE> return redirect(request.get_full_path()) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> other_form = LoginForm() <NEW_LINE> <DEDENT> if hasattr(response, "context_data"): <NEW_LINE> <INDENT> response.context_data['other_form'] = other_form <NEW_LINE> <DEDENT> return response
This delegates most of the work to `django.contrib.auth.views.login`, but we need to display an extra form, that allows users to login with just an email address. To do that without rewriting all the django login code, we tap into the TemplateResponse.context_data and add the extra form
625941c1e5267d203edcdc15
def graph_from_t7(net, graph, t7_file): <NEW_LINE> <INDENT> layers = [] <NEW_LINE> print_layers = [] <NEW_LINE> t7 = torchfile.load(t7_file,force_8bytes_long=True) <NEW_LINE> with graph.as_default(): <NEW_LINE> <INDENT> with tf.name_scope(t7_file): <NEW_LINE> <INDENT> for idx, module in enumerate(t7.modules): <NEW_LINE> <INDENT> if idx in print_layers: <NEW_LINE> <INDENT> print(module) <NEW_LINE> <DEDENT> if module._typename == b'nn.SpatialReflectionPadding': <NEW_LINE> <INDENT> left = module.pad_l <NEW_LINE> right = module.pad_r <NEW_LINE> top = module.pad_t <NEW_LINE> bottom = module.pad_b <NEW_LINE> net = tf.pad(net, [[0,0], [top, bottom], [left, right], [0,0]], 'REFLECT') <NEW_LINE> layers.append(net) <NEW_LINE> <DEDENT> elif module._typename == b'nn.SpatialConvolution': <NEW_LINE> <INDENT> weight = tf.Variable(module.weight.transpose([2,3,1,0]),dtype=tf.float32) <NEW_LINE> bias = module.bias <NEW_LINE> strides = [1, module.dH, module.dW, 1] <NEW_LINE> net = tf.nn.conv2d(net, weight, strides, padding='VALID') <NEW_LINE> net = tf.nn.bias_add(net, bias) <NEW_LINE> layers.append(net) <NEW_LINE> <DEDENT> elif module._typename == b'nn.ReLU': <NEW_LINE> <INDENT> net = tf.nn.relu(net) <NEW_LINE> layers.append(net) <NEW_LINE> <DEDENT> elif module._typename == b'nn.SpatialUpSamplingNearest': <NEW_LINE> <INDENT> d = tf.shape(net) <NEW_LINE> size = [d[1] * module.scale_factor, d[2] * module.scale_factor] <NEW_LINE> net = tf.image.resize_nearest_neighbor(net, size) <NEW_LINE> layers.append(net) <NEW_LINE> <DEDENT> elif module._typename == b'nn.SpatialMaxPooling': <NEW_LINE> <INDENT> net = tf.nn.max_pool(net, ksize=[1, module.kH, module.kW, 1], strides=[1, module.dH, module.dW, 1], padding='VALID', name = str(module.name, 'utf-8')) <NEW_LINE> layers.append(net) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError(module._typename) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return net, layers
Loads a Torch network from a saved .t7 file into Tensorflow. :param net Input to Torch network :param graph Tensorflow graph that the network should be created as part of :param t7 Path to t7 file to use
625941c1bde94217f3682d69
def __init__(self, lexer): <NEW_LINE> <INDENT> self._tokens = [] <NEW_LINE> while True: <NEW_LINE> <INDENT> t = lexer.next_token() <NEW_LINE> if t is None: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> self._tokens.append(t) <NEW_LINE> <DEDENT> self.new_id = unique_id()
init a token list from a lexer
625941c157b8e32f5248340f
def test_coords_have_obj(self): <NEW_LINE> <INDENT> arena = Arena(1, 2) <NEW_LINE> obj = object() <NEW_LINE> arena.data = [[], [obj]] <NEW_LINE> assert arena.coords_have_obj((0, 1), obj) == True <NEW_LINE> assert arena.coords_have_obj((0, 0), obj) == False
Coords contain an specified object
625941c145492302aab5e237
def set(request): <NEW_LINE> <INDENT> profile = retrieve(request) <NEW_LINE> profile_form = UserProfileForm(request.POST, request.FILES, instance=profile) <NEW_LINE> profile_form.save()
updates the information stored in the user's profile
625941c1596a897236089a39
def __init__(self, value, parent): <NEW_LINE> <INDENT> self.value = value <NEW_LINE> self.parent = parent <NEW_LINE> self.left = None <NEW_LINE> self.right = None
Node constructor Parameters ---------- value : int integer value assigned to a node parent : Node Parent of a node, for root this will be None
625941c19c8ee82313fbb6ea
def auto_correlation(series, ar_lags): <NEW_LINE> <INDENT> return correlation(series, series, ar_lags)
Routine to take the correlation of a series with itself at different lag shifts Input: series: the series that will be correlated with itself ar_lags: The array of lags that series will be shifted by Output: corr: The series of normalized correlation coefficients, each index corresponding to the lag found at the same index from ar_lags
625941c1be383301e01b5400
def dp_n3(): <NEW_LINE> <INDENT> @cache <NEW_LINE> def dfs(l, r): <NEW_LINE> <INDENT> print(l, r) <NEW_LINE> if l >= r: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> res = 1e9 <NEW_LINE> for k in range(l, r): <NEW_LINE> <INDENT> rootval = max(arr[l : k + 1]) * max(arr[k + 1 : r + 1]) <NEW_LINE> res = min(res, dfs(l, k) + dfs(k + 1, r) + rootval) <NEW_LINE> <DEDENT> return res <NEW_LINE> <DEDENT> return dfs(0, len(arr) - 1)
Runtime: 327 ms, faster than 6.92% of Python3 online submissions for Minimum Cost Tree From Leaf Values. lee215: what's the complexity? N^2 states and O(N) to find each. So this solution is O(N^3) time and O(N^2) space. REF: https://leetcode.com/problems/minimum-cost-tree-from-leaf-values/discuss/478708/RZ-Summary-of-all-the-solutions-I-have-learned-from-Discuss-in-Python https://leetcode.com/problems/minimum-cost-tree-from-leaf-values/discuss/474188/I-think-I-able-to-explain-it-to-myself-and-to-you...(Java-DP)-.-Complexity-is-in-the-question
625941c191af0d3eaac9b98c
def maxProfit(self, prices): <NEW_LINE> <INDENT> if len(prices) <= 1: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> delta = [prices[i]-prices[i-1] for i in range(1, len(prices))] <NEW_LINE> profit = 0 <NEW_LINE> for n in delta: <NEW_LINE> <INDENT> if n > 0: <NEW_LINE> <INDENT> profit += n <NEW_LINE> <DEDENT> <DEDENT> return profit
:type prices: List[int] :rtype: int
625941c131939e2706e4cde3
def midPoint(df): <NEW_LINE> <INDENT> df['Xmid']=(df['Xstart']+df['Xend'])/2 <NEW_LINE> df['Ymid']=(df['Ystart']+df['Yend'])/2 <NEW_LINE> return df
Finds the midpoint of a dataframe of line segments. Parameters ---------- df: pandas.Dataframe dataframe of the line segments must contain ["Xstart", "Ystart", "Xend", "Yend"] Returns ------- df: pandas.Dataframe with new columns of ['Xmid', 'Ymid']
625941c1d7e4931a7ee9de93
def caseExists(self, case_name): <NEW_LINE> <INDENT> return case_name in self.getCaseList()
@rtype: bool
625941c176d4e153a657eaa6
def fills_with(self, other): <NEW_LINE> <INDENT> assert isinstance(other, IntegralLamination) <NEW_LINE> return NotImplemented
Return whether self \cup other fills.
625941c173bcbd0ca4b2bfec
def updateFromPipeline(self, pipeline): <NEW_LINE> <INDENT> self.clear() <NEW_LINE> if not pipeline: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if len(pipeline.aliases)>0: <NEW_LINE> <INDENT> aliasRoot = QParameterTreeWidgetItem(None, self, QtCore.QStringList('Aliases')) <NEW_LINE> aliasRoot.setFlags(QtCore.Qt.ItemIsEnabled, ) <NEW_LINE> for (alias, info) in pipeline.aliases.iteritems(): <NEW_LINE> <INDENT> ptype, pId, parentType, parentId, _ = info <NEW_LINE> parameter = pipeline.db_get_object(ptype, pId) <NEW_LINE> v = parameter.strValue <NEW_LINE> aType = parameter.type <NEW_LINE> aIdentifier = parameter.identifier <NEW_LINE> aNamespace = parameter.namespace <NEW_LINE> label = QtCore.QStringList('%s = %s' % (alias, v)) <NEW_LINE> pInfo = ParameterInfo(type=aType, identifier=aIdentifier, namespace=aNamespace, value=v, id=pId, dbtype=ptype, parent_dbtype=parentType, parent_id=parentId, is_alias=True) <NEW_LINE> aliasItem = QParameterTreeWidgetItem((alias, [pInfo]), aliasRoot, label) <NEW_LINE> <DEDENT> aliasRoot.setExpanded(True) <NEW_LINE> <DEDENT> inspector = PipelineInspector() <NEW_LINE> inspector.inspect_ambiguous_modules(pipeline) <NEW_LINE> sortedModules = sorted(pipeline.modules.iteritems(), key=lambda item: item[1].name) <NEW_LINE> for mId, module in sortedModules: <NEW_LINE> <INDENT> if len(module.functions)>0: <NEW_LINE> <INDENT> mLabel = QtCore.QStringList(module.name) <NEW_LINE> moduleItem = None <NEW_LINE> for fId in xrange(len(module.functions)): <NEW_LINE> <INDENT> function = module.functions[fId] <NEW_LINE> if len(function.params)==0: continue <NEW_LINE> if moduleItem==None: <NEW_LINE> <INDENT> if inspector.annotated_modules.has_key(mId): <NEW_LINE> <INDENT> annotatedId = inspector.annotated_modules[mId] <NEW_LINE> moduleItem = QParameterTreeWidgetItem(annotatedId, self, mLabel) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> moduleItem = QParameterTreeWidgetItem(None, self, mLabel) <NEW_LINE> <DEDENT> <DEDENT> v = ', '.join([p.strValue for p in function.params]) <NEW_LINE> label = QtCore.QStringList('%s(%s)' % (function.name, v)) <NEW_LINE> pList = [ParameterInfo(type=function.params[pId].type, identifier=function.params[pId].identifier, namespace=function.params[pId].namespace, value=function.params[pId].strValue, id=function.params[pId].real_id, dbtype=ModuleParam.vtType, parent_dbtype=function.vtType, parent_id=function.real_id, is_alias=False) for pId in xrange(len(function.params))] <NEW_LINE> mName = module.name <NEW_LINE> if moduleItem.parameter!=None: <NEW_LINE> <INDENT> mName += '(%d)' % moduleItem.parameter <NEW_LINE> <DEDENT> fName = '%s :: %s' % (mName, function.name) <NEW_LINE> mItem = QParameterTreeWidgetItem((fName, pList), moduleItem, label) <NEW_LINE> <DEDENT> if moduleItem: <NEW_LINE> <INDENT> moduleItem.setExpanded(True)
updateFromPipeline(pipeline: Pipeline) -> None Read the list of aliases and parameters from the pipeline
625941c18e7ae83300e4af42
def strip_comments(str_in): <NEW_LINE> <INDENT> comment = re.search(RE_COMMENT, str_in) <NEW_LINE> while comment: <NEW_LINE> <INDENT> str_in = str_in[:comment.span()[0]] + str_in[comment.span()[1] + 1:] <NEW_LINE> comment = re.search(RE_COMMENT, str_in) <NEW_LINE> <DEDENT> return str_in
Remove single line comments from a multiline string. :param str_in: Input string :type str_in: `str` :rtype: `str`
625941c13317a56b86939bd4
def least_cost_path(G, start, dest, cost): <NEW_LINE> <INDENT> todo = pqueue.PQueue() <NEW_LINE> todo.update(start, 0); <NEW_LINE> visited = set() <NEW_LINE> parent = {} <NEW_LINE> while todo and (dest not in visited): <NEW_LINE> <INDENT> (cur, c) = todo.pop_smallest() <NEW_LINE> visited.add(cur) <NEW_LINE> for n in G.adj_to(cur): <NEW_LINE> <INDENT> if n in visited: continue <NEW_LINE> if todo.update(n, c+cost((cur,n))): <NEW_LINE> <INDENT> parent[n] = cur <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if dest not in visited: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> path = [dest] <NEW_LINE> cur = dest <NEW_LINE> while start not in path: <NEW_LINE> <INDENT> cur = parent[cur] <NEW_LINE> path.append(cur) <NEW_LINE> <DEDENT> path.reverse() <NEW_LINE> return path
path = least_cost_path(G, start, dest, cost) least_cost_path returns a least cost path in the digraph G from vertex start to vertex dest, where costs are defined by the cost function. cost should be a function that takes a single edge argument and returns a real-valued cost. if there is no path, then returns None the path from start to start is [start] >>> g = digraph.Digraph( [(1, 2), (4, 2), (4, 6), (6, 7), (1, 7), (2, 4)] ) >>> least_cost_path(g, 1, 7, lambda e: abs(2*e[0]-e[1])) [1, 7] >>> least_cost_path(g, 7, 2, lambda e: 1) is None True
625941c16aa9bd52df036d19
def check(self): <NEW_LINE> <INDENT> _check_feature(self.app.user, 'check', self) <NEW_LINE> self._check_permission(self.app.user, 'item-modify') <NEW_LINE> self.checked = True <NEW_LINE> self.app.r.oset(self.id, self) <NEW_LINE> self.list.activity.publish(Event.create('item-check', self, app=self.app))
See :http:post:`/api/items/(id)/check`.
625941c1a8ecb033257d3044
def playprevious(*args): <NEW_LINE> <INDENT> pass
playprevious() -- Play previous item in playlist.
625941c126238365f5f0ede2
def test_detail(self): <NEW_LINE> <INDENT> c = Client() <NEW_LINE> for p in Post.fixtures(): <NEW_LINE> <INDENT> url = reverse('blog:detail', kwargs={'slug': p['slug']}) <NEW_LINE> response = c.get(url) <NEW_LINE> self.assertEqual(response.status_code, 200)
check detail page for Post
625941c1dc8b845886cb54aa
def resolve(self, cypherQuery, output='text/plain'): <NEW_LINE> <INDENT> kwargs = {'cypherQuery': cypherQuery} <NEW_LINE> kwargs = {k:dumps(v) if builtins.type(v) is dict else v for k, v in kwargs.items()} <NEW_LINE> param_rest = self._make_rest(None, **kwargs) <NEW_LINE> url = self._basePath + ('/cypher/resolve').format(**kwargs) <NEW_LINE> requests_params = kwargs <NEW_LINE> output = self._get('GET', url, requests_params, output) <NEW_LINE> return output if output else None
Cypher query resolver from: /cypher/resolve Arguments: cypherQuery: The cypher query to resolve outputs: text/plain
625941c129b78933be1e5626
def deleteBobject(self, id, headers=None, query_params=None, content_type="application/json"): <NEW_LINE> <INDENT> uri = self.client.base_url + "/bobject/" + id <NEW_LINE> return self.client.delete(uri, None, headers, query_params, content_type)
Delete bobject It is method for DELETE /bobject/{id}
625941c1c4546d3d9de729a8
@app.route('/sanitize/<ctab>', name="sanitize") <NEW_LINE> def sanitize(ctab): <NEW_LINE> <INDENT> pass
This method is not implemented yet.
625941c1d4950a0f3b08c2c7
def parse_input(_input: List[str]) -> Dict[int, int]: <NEW_LINE> <INDENT> _input = (int(i) for i in _input[0].split(",")) <NEW_LINE> counter = count_occurrences(_input) <NEW_LINE> return counter
Open and read file at filepath, return dictionary holding the number of lanternfish of a certain age.
625941c18c0ade5d55d3e92f
def sorted_refer_to(l,referer,reverse=False,**kwargs): <NEW_LINE> <INDENT> if("mode" in kwargs): <NEW_LINE> <INDENT> mode = kwargs["mode"] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mode = "both" <NEW_LINE> <DEDENT> tl =[] <NEW_LINE> length = l.__len__() <NEW_LINE> for i in range(0,length): <NEW_LINE> <INDENT> ele = (l[i],referer[i]) <NEW_LINE> tl.append(ele) <NEW_LINE> <DEDENT> tl = sorted(tl,key=itemgetter(1),reverse=reverse) <NEW_LINE> sorted_l =[] <NEW_LINE> sorted_r = [] <NEW_LINE> for i in range(0,length): <NEW_LINE> <INDENT> sorted_l.append(tl[i][0]) <NEW_LINE> sorted_r.append(tl[i][1]) <NEW_LINE> <DEDENT> if(mode == "only-list"): <NEW_LINE> <INDENT> return(sorted_l) <NEW_LINE> <DEDENT> elif(mode == "only-referer"): <NEW_LINE> <INDENT> return(referer) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return({"list":sorted_l,"referer":sorted_r})
from elist.elist import * l = ["a","b","c"] referer = [7,8,6] sorted_refer_to(l,referer) {'list': ['c', 'a', 'b'], 'referer': [6, 7, 8]} l referer >>>
625941c115baa723493c3eea
def reverseVowels(self, s): <NEW_LINE> <INDENT> vowels = "aeiou" <NEW_LINE> poses = [] <NEW_LINE> for i,c in enumerate(s): <NEW_LINE> <INDENT> if c.lower() in vowels: <NEW_LINE> <INDENT> poses.append(i) <NEW_LINE> <DEDENT> <DEDENT> chars = [c for c in s] <NEW_LINE> for k in range(len(poses)/2): <NEW_LINE> <INDENT> chars[poses[k]], chars[poses[-k-1]] = chars[poses[-k-1]], chars[poses[k]] <NEW_LINE> <DEDENT> return ''.join(chars)
:type s: str :rtype: str
625941c15f7d997b87174a0c
def ensureenabled(ui): <NEW_LINE> <INDENT> if 'eol' in ui._knownconfig: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> ui.setconfig('extensions', 'eol', '', source='internal') <NEW_LINE> extensions.loadall(ui, ['eol'])
make sure the extension is enabled when used as hook When eol is used through hooks, the extension is never formally loaded and enabled. This has some side effect, for example the config declaration is never loaded. This function ensure the extension is enabled when running hooks.
625941c1ad47b63b2c509ef6
@lru_cache(maxsize=512) <NEW_LINE> def extract_feedback(message: str) -> Optional[FeedbackType]: <NEW_LINE> <INDENT> words: Tuple[str] = tuple(take_away_punctuation(normalize_string(message)).split()) <NEW_LINE> pre_words_prefixes = { 'очень': 1, 'ужасно': 2, 'не': -2, 'невыносим': 0 } <NEW_LINE> keywords_prefixes = { 'холод': -3, 'мороз': -3, 'прохлад': -2, 'зябк': -2, 'неприятн': -1, 'нормаль': 0, 'ок': 0, 'хорош': 0, 'отличн': 0, 'замечат': 0, 'превосход': 0, 'прекрасн': 0, 'тепл': 0, 'вспоте': 2, 'жар': 3, 'горяч': 3, 'зажар': 3, 'запар': 3 } <NEW_LINE> def is_pre_word(pre_word: str) -> Optional[str]: <NEW_LINE> <INDENT> for pre_word_prefix in pre_words_prefixes: <NEW_LINE> <INDENT> if pre_word.startswith(pre_word_prefix): <NEW_LINE> <INDENT> return pre_word_prefix <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> def is_keyword(keyword: str) -> Optional[str]: <NEW_LINE> <INDENT> for keyword_prefix in keywords_prefixes: <NEW_LINE> <INDENT> if keyword.startswith(keyword_prefix): <NEW_LINE> <INDENT> return keyword <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> pre_word_affect = 0 <NEW_LINE> pre_word_dist = -1 <NEW_LINE> for word in words: <NEW_LINE> <INDENT> if pre_word_dist >= 0: <NEW_LINE> <INDENT> pre_word_dist += 1 <NEW_LINE> <DEDENT> if pre_word_dist == 2: <NEW_LINE> <INDENT> pre_word_affect = 0 <NEW_LINE> pre_word_dist = 0 <NEW_LINE> <DEDENT> pre_word_prefix = is_pre_word(word) <NEW_LINE> keyword_prefix = is_keyword(word) <NEW_LINE> if pre_word_prefix: <NEW_LINE> <INDENT> pre_word_affect = pre_words_prefixes[word] <NEW_LINE> pre_word_dist = 0 <NEW_LINE> <DEDENT> elif keyword_prefix: <NEW_LINE> <INDENT> effect = keywords_prefixes[keyword_prefix] + pre_word_affect <NEW_LINE> return effect <NEW_LINE> <DEDENT> <DEDENT> feedbacks = ('-5', '-4', '-3', '-2', '-1', '0', '1', '2', '3', '4', '5') <NEW_LINE> for word in words: <NEW_LINE> <INDENT> if word in feedbacks: <NEW_LINE> <INDENT> return int(word) <NEW_LINE> <DEDENT> <DEDENT> return None
Extracts a single feedback out of message :param message: a message from which a feedback is being extracted :return: a number from FEEDBACK_RANGE
625941c10fa83653e4656f33
@hub.pretrained( "https://data.megengine.org.cn/models/weights/" "deeplabv3plus_res101_voc_512size_79dot5_7856dc84.pkl" ) <NEW_LINE> def deeplabv3plus_res101_voc_512size(**kwargs): <NEW_LINE> <INDENT> cfg = VOCConfig() <NEW_LINE> cfg.backbone_pretrained = False <NEW_LINE> return models.DeepLabV3Plus(cfg, **kwargs)
DeepLab v3+ model from `"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" <https://arxiv.org/abs/1802.02611>`_
625941c1ad47b63b2c509ef7
def is_running(self): <NEW_LINE> <INDENT> return self.this_step < self.num_steps
Check if time is less than end time
625941c13539df3088e2e2c2
def import_video( filepath, landmark_resolver=same_name_video, normalize=None, normalise=None, importer_method="ffmpeg", exact_frame_count=True, ): <NEW_LINE> <INDENT> normalize = _parse_deprecated_normalise(normalise, normalize) <NEW_LINE> kwargs = {"normalize": normalize, "exact_frame_count": exact_frame_count} <NEW_LINE> video_importer_methods = {"ffmpeg": ffmpeg_video_types} <NEW_LINE> if importer_method not in video_importer_methods: <NEW_LINE> <INDENT> raise ValueError( "Unsupported importer method requested. Valid values " "are: {}".format(video_importer_methods.keys()) ) <NEW_LINE> <DEDENT> return _import( filepath, video_importer_methods[importer_method], landmark_ext_map=image_landmark_types, landmark_resolver=landmark_resolver, landmark_attach_func=_import_lazylist_attach_landmarks, importer_kwargs=kwargs, )
Single video (and associated landmarks) importer. If a video file is found at `filepath`, returns an :map:`LazyList` wrapping all the frames of the video. By default, landmark files sharing the same filename stem will be imported and attached with a group name based on the extension of the landmark file appended with the frame number, although this behavior can be customised (see `landmark_resolver`). .. warning:: This method currently uses ffmpeg to perform the importing. In order to recover accurate frame counts from videos it is necessary to use ffprobe to count the frames. This involves reading the entire video in to memory which may cause a delay in loading despite the lazy nature of the video loading within Menpo. If ffprobe cannot be found, and `exact_frame_count` is ``False``, Menpo falls back to ffmpeg itself which is not accurate and the user should proceed at their own risk. Parameters ---------- filepath : `pathlib.Path` or `str` A relative or absolute filepath to a video file. landmark_resolver : `function` or `None`, optional This function will be used to find landmarks for the video. The function should take two arguments (the path to the video and the frame number) and return a dictionary of the form ``{'group_name': 'landmark_filepath'}`` Default finds landmarks with the same name as the video file, appended with '_{frame_number}'. If ``None``, landmark importing will be skipped. normalize : `bool`, optional If ``True``, normalize the frame pixels between 0 and 1 and convert to floating point. If ``False``, the native datatype of the image will be maintained (commonly `uint8`). Note that in general Menpo assumes :map:`Image` instances contain floating point data - if you disable this flag you will have to manually convert the farmes you import to floating point before doing most Menpo operations. This however can be useful to save on memory usage if you only wish to view or crop the frames. normalise : `bool`, optional Deprecated version of normalize. Please use the normalize arg. importer_method : {'ffmpeg'}, optional A string representing the type of importer to use, by default ffmpeg is used. exact_frame_count: `bool`, optional If ``True``, the import fails if ffprobe is not available (reading from ffmpeg's output returns inexact frame count) Returns ------- frames : :map:`LazyList` An lazy list of :map:`Image` or subclass thereof which wraps the frames of the video. This list can be treated as a normal list, but the frame is only read when the video is indexed or iterated. Examples -------- >>> video = menpo.io.import_video('video.avi') >>> # Lazily load the 100th frame without reading the entire video >>> frame100 = video[100]
625941c1a934411ee375160a
def test_afterhours_start(self): <NEW_LINE> <INDENT> with patch('zipline.gens.realtimeclock.pd.to_datetime') as to_dt, patch('zipline.gens.realtimeclock.sleep') as sleep: <NEW_LINE> <INDENT> rtc = RealtimeClock( self.sessions, self.opens, self.closes, days_at_time(self.sessions, time(8, 45), "US/Eastern"), False ) <NEW_LINE> to_dt.side_effect = self.get_clock <NEW_LINE> sleep.side_effect = self.advance_clock <NEW_LINE> self.internal_clock = pd.Timestamp("2017-04-20 20:05", tz='UTC') <NEW_LINE> events = list(rtc) <NEW_LINE> self.assertEquals(len(events), 2) <NEW_LINE> _, event_type = events[0] <NEW_LINE> self.assertEquals(event_type, SESSION_START) <NEW_LINE> event_time, event_type = events[1] <NEW_LINE> self.assertEquals(event_time, pd.Timestamp("2017-04-20 20:05", tz='UTC')) <NEW_LINE> self.assertEquals(event_type, BEFORE_TRADING_START_BAR)
Tests that RealtimeClock returns immediately if started after RTH
625941c1a05bb46b383ec79a
def main(argv): <NEW_LINE> <INDENT> argv.reverse() <NEW_LINE> to_wrap = argv[0] <NEW_LINE> if not os.path.exists(to_wrap): <NEW_LINE> <INDENT> raise ValueError("%s : Does not exist." % to_wrap) <NEW_LINE> <DEDENT> with open(to_wrap, "r+", encoding="UTF-8") as f: <NEW_LINE> <INDENT> existing = f.read(); <NEW_LINE> f.seek(0); <NEW_LINE> for line in lines_to_pre_append(): <NEW_LINE> <INDENT> f.write(line) <NEW_LINE> <DEDENT> f.write(existing) <NEW_LINE> <DEDENT> return 0
Main entry point Args: argv (list): List of strings giving command line arguments The full absolulte path to the file to wrap is mandatory.
625941c1d53ae8145f87a1ea
def __getitem__(self,index): <NEW_LINE> <INDENT> row = self._target_df.iloc[index] <NEW_LINE> data = self._vectorizer.vectorize(row.surname) <NEW_LINE> label = self._vectorizer.nation_vocab._token_to_idx[row.nationality] <NEW_LINE> return { 'surname':data, 'nation':label }
the main entry point for Pytorch Dataset. Return the data and its label based on the index Arguments: index {[int]} -- [the index to the data] Returns: [dictionary] -- [the dictionary includes data and corresponding label]
625941c17b25080760e393d1
def _construct_url_4(self, root_class, sec_class, parent_class, obj_class, child_includes): <NEW_LINE> <INDENT> root = self.module.params.get(root_class) <NEW_LINE> root_dict = URL_MAPPING[root_class] <NEW_LINE> root_class = root_dict['aci_class'] <NEW_LINE> root_mo = root_dict['mo'] <NEW_LINE> sec = self.module.params.get(sec_class) <NEW_LINE> sec_dict = URL_MAPPING[sec_class] <NEW_LINE> sec_class = sec_dict['aci_class'] <NEW_LINE> sec_mo = sec_dict['mo'] <NEW_LINE> parent = self.module.params.get(parent_class) <NEW_LINE> parent_dict = URL_MAPPING[parent_class] <NEW_LINE> parent_class = parent_dict['aci_class'] <NEW_LINE> parent_mo = parent_dict['mo'] <NEW_LINE> obj = self.module.params.get(obj_class) <NEW_LINE> obj_dict = URL_MAPPING[obj_class] <NEW_LINE> obj_class = obj_dict['aci_class'] <NEW_LINE> obj_mo = obj_dict['mo'] <NEW_LINE> if self.module.params['state'] != 'query': <NEW_LINE> <INDENT> path = 'api/mo/uni/{}[{}]/{}[{}]/{}[{}]/{}[{}].json'.format(root_mo, root, sec_mo, sec, parent_mo, parent, obj_mo, obj) <NEW_LINE> filter_string = '?rsp-prop-include=config-only' + child_includes <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> path = 'api/class/{}.json'.format(obj_class) <NEW_LINE> filter_string = child_includes <NEW_LINE> <DEDENT> return path, filter_string
This method is used by get_url when the object is the third-level class.
625941c1090684286d50ec5a
def saveTiffMultipage(arr, fn, rescaleTo8bit=False, rgbOrder="rgba", **params): <NEW_LINE> <INDENT> if arr.ndim == 4: <NEW_LINE> <INDENT> if arr.shape[1] not in (1,2,3,4): <NEW_LINE> <INDENT> raise ValueError("can save 4d arrays (color) only with second dim of len 1..4 (RG[B[A]])") <NEW_LINE> <DEDENT> <DEDENT> elif arr.ndim != 3: <NEW_LINE> <INDENT> raise ValueError("can only save 3d (grey) or 4d (color) arrays") <NEW_LINE> <DEDENT> fp = open(fn, 'w+b') <NEW_LINE> ifd_offsets=[] <NEW_LINE> if rescaleTo8bit: <NEW_LINE> <INDENT> mi,ma = float(arr.min()), float(arr.max()) <NEW_LINE> ra = ma-mi <NEW_LINE> <DEDENT> params["_debug_multipage"] = True <NEW_LINE> for z in range(arr.shape[0]): <NEW_LINE> <INDENT> if rescaleTo8bit: <NEW_LINE> <INDENT> a=(arr[z]-mi)*255./ra <NEW_LINE> ii = array2image(a.astype(N.uint8), rgbOrder=rgbOrder) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ii = array2image(arr[z], rgbOrder=rgbOrder) <NEW_LINE> <DEDENT> fp.seek(0,2) <NEW_LINE> if z==0: <NEW_LINE> <INDENT> ifdOffset = 8 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ifdOffset = fp.tell() <NEW_LINE> <DEDENT> ii.save(fp, format="TIFF", **params) <NEW_LINE> if z>0: <NEW_LINE> <INDENT> ifdo = ifd_offsets[-1] <NEW_LINE> fp.seek(ifdo) <NEW_LINE> ifdLength = ii._debug_multipage.i16(fp.read(2)) <NEW_LINE> fp.seek(ifdLength*12,1) <NEW_LINE> fp.write(ii._debug_multipage.o32( ifdOffset )) <NEW_LINE> <DEDENT> ifd_offsets.append(ifdOffset) <NEW_LINE> <DEDENT> fp.close()
extension to PIL save TIFF **params is directly forwarded to PIL save function
625941c192d797404e304100
def get_step(self): <NEW_LINE> <INDENT> direction = choice([1, -1]) <NEW_LINE> distance = choice([0, 1, 2, 3, 4]) <NEW_LINE> return direction * distance
Decide which direction to go and how far to fo in that direction
625941c18a43f66fc4b53fde
def _cih_disable_selinux(self, log): <NEW_LINE> <INDENT> host = self.cih_host <NEW_LINE> log.cl_info("disabling SELinux on host [%s]", host.sh_hostname) <NEW_LINE> ret = host.sh_disable_selinux(log) <NEW_LINE> if ret: <NEW_LINE> <INDENT> log.cl_error("failed to disable SELinux on host [%s]", host.sh_hostname) <NEW_LINE> return -1 <NEW_LINE> <DEDENT> return 0
Disable the SELinux on the host
625941c163d6d428bbe44466
def diagnose_em(self,cool_limits=None,hot_limits=None,t_ratio_cool=[10**6.],t_ratio_hot=[10**7.],calc_slope=True,calc_ratio=True): <NEW_LINE> <INDENT> if not hasattr(self,'em_binned'): <NEW_LINE> <INDENT> raise AttributeError("EM histograms not yet binned. Run self.calc_stats() before calculating EM diagnostics.") <NEW_LINE> <DEDENT> if len(t_ratio_cool) != len(t_ratio_hot): <NEW_LINE> <INDENT> raise ValueError("Temperature ratio lists must have same length.") <NEW_LINE> <DEDENT> self.em_ratio_tpairs=[[tc,th] for tc,th in zip(t_ratio_cool,t_ratio_hot)] <NEW_LINE> self.diagnostics=[] <NEW_LINE> for upper in self.em_binned: <NEW_LINE> <INDENT> tmp = [] <NEW_LINE> for lower in upper: <NEW_LINE> <INDENT> if calc_slope: <NEW_LINE> <INDENT> t_cool,em_cool,t_hot,em_hot = self._split_branch(lower['bin_centers'],lower['hist']) <NEW_LINE> cool_lims,hot_lims = cool_limits,hot_limits <NEW_LINE> cool_lims = self._find_fit_limits(t_cool,em_cool,cool_lims,temp_opt='cool') <NEW_LINE> hot_lims = self._find_fit_limits(t_hot,em_hot,hot_lims) <NEW_LINE> dc = self._fit_em_branch(t_cool,em_cool,cool_lims) <NEW_LINE> dh = self._fit_em_branch(t_hot,em_hot,hot_lims) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dc,dh=None,None <NEW_LINE> <DEDENT> if calc_ratio: <NEW_LINE> <INDENT> tmp_ratio=[self._calc_em_ratio(tp[0],tp[1],lower['bin_centers'],lower['hist']) for tp in self.em_ratio_tpairs] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tmp_ratio=[None for _ in self.em_ratio_tpairs] <NEW_LINE> <DEDENT> tmp.append({'cool':dc,'hot':dh,'ratio':tmp_ratio}) <NEW_LINE> <DEDENT> self.diagnostics.append(tmp)
Fit binned emission measure histograms on hot and cool sides
625941c1ff9c53063f47c16b
def list(self, request): <NEW_LINE> <INDENT> comments = Comments.objects.all() <NEW_LINE> post_id = request.query_params.get('post_id', None) <NEW_LINE> if post_id is not None: <NEW_LINE> <INDENT> comments = comments.filter(post_id = post_id) <NEW_LINE> <DEDENT> serializer = CommentSerializer( comments, many=True, context={'request': request}) <NEW_LINE> return Response(serializer.data)
Handle GET requests to comments resource Returns: Response -- JSON serialized list of comments
625941c1a79ad161976cc0bc
def setup(self, client=None): <NEW_LINE> <INDENT> if client is None: <NEW_LINE> <INDENT> address = self.host, self.port <NEW_LINE> self.socket = socket.socket() <NEW_LINE> self.socket.connect(address) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.socket = client <NEW_LINE> <DEDENT> return Connection.setup(self)
Connect to the given address. If a host connection object is not already listening on this address, this will not work.
625941c1956e5f7376d70de5
def create(self): <NEW_LINE> <INDENT> self._require('name', 'instance') <NEW_LINE> self._pretty_print(self.dbaas.backups.create, self.name, self.instance, self.description)
Create a new backup
625941c12ae34c7f2600d0a9
def setPreferredDescription(self, preferredDescription): <NEW_LINE> <INDENT> return _DataModel.OriginUncertainty_setPreferredDescription(self, preferredDescription)
setPreferredDescription(OriginUncertainty self, Seiscomp::Core::Optional< Seiscomp::DataModel::OriginUncertaintyDescription >::Impl const & preferredDescription)
625941c1ab23a570cc2500f8
def create_state_variables(self, scpd_xml): <NEW_LINE> <INDENT> state_vars = [] <NEW_LINE> for state_var_xml in scpd_xml.findall('.//service:stateVariable', NS): <NEW_LINE> <INDENT> state_var = self.create_state_variable(state_var_xml) <NEW_LINE> state_vars.append(state_var) <NEW_LINE> <DEDENT> return state_vars
Create UpnpStateVariables from scpd_xml.
625941c14d74a7450ccd413a
def initiate_election(s): <NEW_LINE> <INDENT> time.sleep(1) <NEW_LINE> s.send(my_id.encode('utf-8')) <NEW_LINE> print("token sent: " + my_id) <NEW_LINE> print("Election initiated")
""putting the current thred to sleep in order to provide delay in the algorithm
625941c132920d7e50b28145
def move(self, time, number_of_level): <NEW_LINE> <INDENT> self.x += self.vx <NEW_LINE> if self.x >= SCREEN_X - self.r: <NEW_LINE> <INDENT> self.x = SCREEN_X - self.r <NEW_LINE> self.vx = -1 * self.vx <NEW_LINE> self.x += self.vx <NEW_LINE> <DEDENT> if self.x <= self.r: <NEW_LINE> <INDENT> self.x = self.r <NEW_LINE> self.vx = -1 * self.vx <NEW_LINE> self.x += self.vx <NEW_LINE> <DEDENT> if int(time) % 1000 < 10: <NEW_LINE> <INDENT> self.vx = randint(-(number_of_level - NUMBER_OF_LEVEL_WHEN_TARGETS_STARTS_MOVING), (number_of_level - NUMBER_OF_LEVEL_WHEN_TARGETS_STARTS_MOVING)) * (FPS / 60)
Стандартный move (см docs.txt)
625941c1cdde0d52a9e52fa8
def __init__( self, *, managed: Optional[bool] = None, enable_azure_rbac: Optional[bool] = None, admin_group_object_i_ds: Optional[List[str]] = None, client_app_id: Optional[str] = None, server_app_id: Optional[str] = None, server_app_secret: Optional[str] = None, tenant_id: Optional[str] = None, **kwargs ): <NEW_LINE> <INDENT> super(ManagedClusterAADProfile, self).__init__(**kwargs) <NEW_LINE> self.managed = managed <NEW_LINE> self.enable_azure_rbac = enable_azure_rbac <NEW_LINE> self.admin_group_object_i_ds = admin_group_object_i_ds <NEW_LINE> self.client_app_id = client_app_id <NEW_LINE> self.server_app_id = server_app_id <NEW_LINE> self.server_app_secret = server_app_secret <NEW_LINE> self.tenant_id = tenant_id
:keyword managed: Whether to enable managed AAD. :paramtype managed: bool :keyword enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization. :paramtype enable_azure_rbac: bool :keyword admin_group_object_i_ds: The list of AAD group object IDs that will have admin role of the cluster. :paramtype admin_group_object_i_ds: list[str] :keyword client_app_id: The client AAD application ID. :paramtype client_app_id: str :keyword server_app_id: The server AAD application ID. :paramtype server_app_id: str :keyword server_app_secret: The server AAD application secret. :paramtype server_app_secret: str :keyword tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription. :paramtype tenant_id: str
625941c155399d3f0558862a
def float_equal(a, b): <NEW_LINE> <INDENT> if math.fabs(a - b) < math.pow(10, -PRECISION): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> elif (a < math.pow(1, -20) and math.isnan(b)) or (b < math.pow(1, -20) and math.isnan(a)): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
判断两个float数是否相等,差小于1e-7即认为相等 :param a: :param b: :return:
625941c1eab8aa0e5d26dace
def handleCertificateError(self, eventId: int, action: str) -> Awaitable[Dict]: <NEW_LINE> <INDENT> return self.client.send( "Security.handleCertificateError", {"eventId": eventId, "action": action} )
Handles a certificate error that fired a certificateError event. Status: Deprecated See `https://chromedevtools.github.io/devtools-protocol/tot/Security#method-handleCertificateError` :param eventId: The ID of the event. :param action: The action to take on the certificate error. :return: The results of the command
625941c138b623060ff0ad65
def download_photo(http, url, file_path): <NEW_LINE> <INDENT> request = http.request('GET', base_url + url) <NEW_LINE> if request.status == 200: <NEW_LINE> <INDENT> image_obj = io.BytesIO(request.data) <NEW_LINE> image = Image.open(image_obj) <NEW_LINE> try: <NEW_LINE> <INDENT> image.save(file_path) <NEW_LINE> <DEDENT> except FileNotFoundError: <NEW_LINE> <INDENT> os.makedirs('data/files/danbooru') <NEW_LINE> image.save(file_path) <NEW_LINE> <DEDENT> return Image.open(file_path)
Downloads a photo and returns the file object
625941c12eb69b55b151c824
@pytest.fixture() <NEW_LINE> def testfile2(): <NEW_LINE> <INDENT> orig_path = os.path.join(RELATIVE_DIR, "testdata", "testfile2") <NEW_LINE> path = "_testfile2" <NEW_LINE> copyfile(orig_path, path) <NEW_LINE> yield path <NEW_LINE> if os.path.exists(path): <NEW_LINE> <INDENT> os.remove(path)
This fixture creates a copy of testfile2 that can be read and written to and yields a usable path to it. It also deletes the file when the test concludes.
625941c1507cdc57c6306c4d
def list_tags_for_resource(resourceArn=None): <NEW_LINE> <INDENT> pass
List tags for resource. See also: AWS API Documentation Exceptions :example: response = client.list_tags_for_resource( resourceArn='string' ) :type resourceArn: string :param resourceArn: [REQUIRED] Resource arn used to list tags. :rtype: dict ReturnsResponse Syntax{ 'tags': { 'string': 'string' } } Response Structure (dict) --Response for list tags. tags (dict) --Tags result for response. (string) -- (string) -- Exceptions Amplify.Client.exceptions.InternalFailureException Amplify.Client.exceptions.BadRequestException Amplify.Client.exceptions.ResourceNotFoundException :return: { 'tags': { 'string': 'string' } } :returns: Amplify.Client.exceptions.InternalFailureException Amplify.Client.exceptions.BadRequestException Amplify.Client.exceptions.ResourceNotFoundException
625941c1e64d504609d747b7
def __fade_out(self, _from, _to): <NEW_LINE> <INDENT> self.curr_vol = _from <NEW_LINE> __dest_vol = _to <NEW_LINE> while self.is_playing and (self.curr_vol > __dest_vol) and self.curr_vol > 0: <NEW_LINE> <INDENT> self.log.debug("volume: {}...".format(self.curr_vol)) <NEW_LINE> self.curr_vol = self.curr_vol - 3 <NEW_LINE> if self.curr_vol < 0: <NEW_LINE> <INDENT> self.curr_vol = 0 <NEW_LINE> <DEDENT> self.master_device.set_volume(self.curr_vol) <NEW_LINE> for slave in self.slave_devices: <NEW_LINE> <INDENT> slave.set_volume(self.curr_vol) <NEW_LINE> <DEDENT> sleep(.40)
Lautstärke langsam absenken :param _from: :param _to:
625941c1a05bb46b383ec79b
def fit(self, X, y): <NEW_LINE> <INDENT> X, y = check_X_y( X.toarray() if isinstance(X, sparse.csr.csr_matrix) else X, y ) <NEW_LINE> self.classes_, y = np.unique(y, return_inverse=True) <NEW_LINE> clf_copy = deepcopy(self.base_clf) <NEW_LINE> self.multiclass_, self.ovr_classifiers_ = self._build_ovr_classifiers( self.classes_, clf_copy ) <NEW_LINE> self.X_ = X <NEW_LINE> self.y_ = y <NEW_LINE> self.nb_transformers = {} <NEW_LINE> if not self.multiclass_: <NEW_LINE> <INDENT> self.nb_transformers[0] = NaiveBayesTransformer(y, self.classes_[-1]) <NEW_LINE> X_transformed = self.nb_transformers[0].fit_transform(X) <NEW_LINE> self.ovr_classifiers_[0].fit(X_transformed, y) <NEW_LINE> try: <NEW_LINE> <INDENT> self.ovr_classifiers_[0].coef_ = self._interpolate(self.ovr_classifiers_[0].coef_) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> warnings.warn( "the classifier you instantiated does not have an attribute `.coef_`; " "interpolation will not occur" ) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> labels_dict = self._binarize_labels(y) <NEW_LINE> if labels_dict.keys() != self.ovr_classifiers_.keys(): <NEW_LINE> <INDENT> raise Exception( "mismatch in labels during fit() and class instantiation; {} != {}".format( labels_dict.keys(), self.ovr_classifiers_.keys() ) ) <NEW_LINE> <DEDENT> for l, clf in self.ovr_classifiers_.items(): <NEW_LINE> <INDENT> self.nb_transformers[l] = NaiveBayesTransformer(y, l) <NEW_LINE> X_transformed = self.nb_transformers[l].fit_transform(X) <NEW_LINE> clf.fit(X_transformed, labels_dict[l]) <NEW_LINE> try: <NEW_LINE> <INDENT> clf.coef_ = self._interpolate(clf.coef_) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> if l == list(self.ovr_classifiers_.keys())[0]: <NEW_LINE> <INDENT> warnings.warn( "the classifier you instantiated does not have an attribute `.coef_`; " "interpolation will not occur" ) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return self
Parameters ---------- X: scipy.csr_matrix A *sparse* representation of features y: array-like of shape = [1, n_samples] The labels of the dataset Returns ------- self: object Returns self
625941c1fff4ab517eb2f3b2
def find_unique_values_list(list_input: []): <NEW_LINE> <INDENT> list_output = [] <NEW_LINE> for item in list_input: <NEW_LINE> <INDENT> if item not in list_output: <NEW_LINE> <INDENT> list_output.append(item) <NEW_LINE> <DEDENT> <DEDENT> return list_output
Find the unique values in a list :param list_input: :return:
625941c14e4d5625662d4352
def GetPointer(self): <NEW_LINE> <INDENT> return _itkImagePython.itkImageUL3_GetPointer(self)
GetPointer(self) -> itkImageUL3
625941c1925a0f43d2549dec
def _set_from_string(self, note): <NEW_LINE> <INDENT> m = NOTE_MATCHER.match(note) <NEW_LINE> if m is not None: <NEW_LINE> <INDENT> name, accidentals, octave = m.group(1), m.group(2), m.group(3) <NEW_LINE> self._base_name = name <NEW_LINE> try: <NEW_LINE> <INDENT> self._octave = int(octave) <NEW_LINE> <DEDENT> except (NameError, ValueError): <NEW_LINE> <INDENT> self._octave = 4 <NEW_LINE> <DEDENT> self.accidentals = accidentals <NEW_LINE> return <NEW_LINE> <DEDENT> raise Exception("Unknown note format: {0}".format(note))
Set the Note from a string representation Example: C4 should return middle C on a piano string: C4 octave: 4 offset: 0 base_name: C accidentals: 0
625941c1d7e4931a7ee9de94
def getRootJobs(self) -> Set['Job']: <NEW_LINE> <INDENT> roots = set(self._registry.keys()) <NEW_LINE> for job in self._registry.values(): <NEW_LINE> <INDENT> for otherID in job.description.successorsAndServiceHosts(): <NEW_LINE> <INDENT> if otherID in roots: <NEW_LINE> <INDENT> roots.remove(otherID) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return {self._registry[jid] for jid in roots}
Returns the set of root job objects that contain this job. A root job is a job with no predecessors (i.e. which are not children, follow-ons, or services). Only deals with jobs created here, rather than loaded from the job store.
625941c1eab8aa0e5d26dacf
def __new__(self, *args, **kargs): <NEW_LINE> <INDENT> if self._instance is None: <NEW_LINE> <INDENT> self._instance = object.__new__(self, *args, **kargs) <NEW_LINE> self._tables = {} <NEW_LINE> self._labels = [] <NEW_LINE> <DEDENT> return self._instance
Replacement method for implementing the singleton design pattern.
625941c16fece00bbac2d6b4
def _filter_request(self, fp, request): <NEW_LINE> <INDENT> if fp in self.__filter_set: <NEW_LINE> <INDENT> logger.info("Filter Request [{}] <{}>".format(request.method, request.url)) <NEW_LINE> return False <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return True
Use set filter request
625941c14a966d76dd550f85
def dfa(x, scale_lim=[5,9], scale_dens=0.25, show=False): <NEW_LINE> <INDENT> y = np.cumsum(x - np.mean(x)) <NEW_LINE> scales = (2**np.arange(scale_lim[0], scale_lim[1], scale_dens)).astype(np.int) <NEW_LINE> fluct = np.zeros(len(scales)) <NEW_LINE> for e, sc in enumerate(scales): <NEW_LINE> <INDENT> fluct[e] = np.sqrt(np.mean(calc_rms(y, sc)**2)) <NEW_LINE> <DEDENT> coeff = np.polyfit(np.log2(scales), np.log2(fluct), 1) <NEW_LINE> if show: <NEW_LINE> <INDENT> fluctfit = 2**np.polyval(coeff,np.log2(scales)) <NEW_LINE> plt.loglog(scales, fluct, 'bo') <NEW_LINE> plt.loglog(scales, fluctfit, 'r', label=r'$\alpha$ = %0.2f'%coeff[0]) <NEW_LINE> plt.title('DFA') <NEW_LINE> plt.xlabel(r'$\log_{10}$(time window)') <NEW_LINE> plt.ylabel(r'$\log_{10}$<F(t)>') <NEW_LINE> plt.legend() <NEW_LINE> plt.show() <NEW_LINE> <DEDENT> return scales, fluct, coeff[0]
Detrended Fluctuation Analysis - measures power law scaling coefficient of the given signal *x*. More details about the algorithm you can find e.g. here: Hardstone, R. et al. Detrended fluctuation analysis: A scale-free view on neuronal oscillations, (2012). Args: ----- *x* : numpy.array one dimensional data vector *scale_lim* = [5,9] : list of length 2 boundaries of the scale, where scale means windows among which RMS is calculated. Numbers from list are exponents of 2 to the power of X, eg. [5,9] is in fact [2**5, 2**9]. You can think of it that if your signal is sampled with F_s = 128 Hz, then the lowest considered scale would be 2**5/128 = 32/128 = 0.25, so 250 ms. *scale_dens* = 0.25 : float density of scale divisions, eg. for 0.25 we get 2**[5, 5.25, 5.5, ... ] *show* = False if True it shows matplotlib log-log plot. Returns: -------- *scales* : numpy.array vector of scales (x axis) *fluct* : numpy.array fluctuation function values (y axis) *alpha* : float estimation of DFA exponent
625941c11d351010ab855a94
def test_enforcing_reading_byteorder(self): <NEW_LINE> <INDENT> tr = Trace(data=np.arange(10, dtype="int32")) <NEW_LINE> memfile = compatibility.BytesIO() <NEW_LINE> tr.write(memfile, format="mseed", byteorder="<") <NEW_LINE> memfile.seek(0, 0) <NEW_LINE> tr2 = read(memfile, header_byteorder="<")[0] <NEW_LINE> memfile.seek(0, 0) <NEW_LINE> self.assertEqual(tr2.stats.mseed.byteorder, "<") <NEW_LINE> del tr2.stats.mseed <NEW_LINE> del tr2.stats._format <NEW_LINE> self.assertEqual(tr, tr2) <NEW_LINE> self.assertRaises(ValueError, read, memfile, header_byteorder=">") <NEW_LINE> memfile = compatibility.BytesIO() <NEW_LINE> tr.write(memfile, format="mseed", byteorder=">") <NEW_LINE> memfile.seek(0, 0) <NEW_LINE> tr2 = read(memfile, header_byteorder=">")[0] <NEW_LINE> memfile.seek(0, 0) <NEW_LINE> self.assertEqual(tr2.stats.mseed.byteorder, ">") <NEW_LINE> del tr2.stats.mseed <NEW_LINE> del tr2.stats._format <NEW_LINE> self.assertEqual(tr, tr2) <NEW_LINE> self.assertRaises(ValueError, read, memfile, header_byteorder="<")
Tests if setting the byteorder of the header for reading is passed to the C functions. Quite simple. It just checks if reading with the correct byteorder works and reading with the wrong byteorder fails.
625941c1ff9c53063f47c16c
def on_star_click(self, cell, model, path, iter): <NEW_LINE> <INDENT> entry = model.get(iter, 0)[0] <NEW_LINE> if self.has_star(entry): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.add_star(entry) <NEW_LINE> <DEDENT> model.row_changed(Gtk.TreePath(path), iter) <NEW_LINE> self.emit('star', model, iter)
Star the song associated with the clicked cell. Args: cell: the tree view cell that was clicked model: the songs model path: Unknown iter: the songs model iter reference for the clicked song
625941c1167d2b6e31218b0d
def find_user_id(self, force=False): <NEW_LINE> <INDENT> self.user_id = "unkown" <NEW_LINE> raise NotImplementedError()
finds the user id of a user and caches it. If a chaced value is ther it will use that. If you specify force, it will regenerate it
625941c182261d6c526ab414
def test_reformat_weburl_2(self): <NEW_LINE> <INDENT> url = '' <NEW_LINE> self.assertEqual(self.cmd.reformat_weburl(url), 'Not available')
Testing reformat_url, empty string
625941c1ec188e330fd5a71b
def __init__(self, y=0, nu=1): <NEW_LINE> <INDENT> self.ynu = np.array([y, nu], dtype=float)
Args: y: distance off axis for start of ray nu: angle from axial direction (radians)
625941c1167d2b6e31218b0e
def get_default_config(self): <NEW_LINE> <INDENT> config = super(SignalfxHandler, self).get_default_config() <NEW_LINE> config.update({ 'url': 'https://ingest.signalfx.com/v2/datapoint', 'batch': 300, 'batch_max_interval': 10, 'auth_token': '', }) <NEW_LINE> return config
Return the default config for the handler
625941c185dfad0860c3add1
def delayed_redraw(self): <NEW_LINE> <INDENT> with self._defer_lock: <NEW_LINE> <INDENT> whence = self._defer_whence <NEW_LINE> self._defer_whence = self._defer_whence_reset <NEW_LINE> flag = self._defer_flag <NEW_LINE> self._defer_flag = False <NEW_LINE> <DEDENT> if flag: <NEW_LINE> <INDENT> self.redraw_now(whence=whence)
Handle delayed redrawing of the canvas.
625941c14f6381625f1149b4
def addFixedEffect(self,F=None,A=None,Ftest=None): <NEW_LINE> <INDENT> if A is None: <NEW_LINE> <INDENT> A = sp.eye(self.P) <NEW_LINE> <DEDENT> if F is None: <NEW_LINE> <INDENT> F = sp.ones((self.N,1)) <NEW_LINE> if self.Ntest is not None: <NEW_LINE> <INDENT> Ftest = sp.ones((self.Ntest,1)) <NEW_LINE> <DEDENT> <DEDENT> assert A.shape[1]==self.P, 'VarianceDecomposition:: A has incompatible shape' <NEW_LINE> assert F.shape[0]==self.N, 'VarianceDecimposition:: F has incompatible shape' <NEW_LINE> for m in range(F.shape[1]): <NEW_LINE> <INDENT> self.vd.addFixedEffTerm(A,F[:,m:m+1]) <NEW_LINE> self.n_fixedEffs += 1 <NEW_LINE> <DEDENT> if Ftest is not None: <NEW_LINE> <INDENT> assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)' <NEW_LINE> assert Ftest.shape[0]==self.Ntest, 'VarianceDecimposition:: Ftest has incompatible shape' <NEW_LINE> assert Ftest.shape[1]==F.shape[1], 'VarianceDecimposition:: Ftest has incompatible shape' <NEW_LINE> for m in range(Ftest.shape[1]): <NEW_LINE> <INDENT> self.Fstar.append(Ftest[:,m:m+1]) <NEW_LINE> <DEDENT> <DEDENT> self.gp = None <NEW_LINE> self.init = False <NEW_LINE> self.fast = False <NEW_LINE> self.optimum = None <NEW_LINE> self.cache['Sigma'] = None <NEW_LINE> self.cache['Hessian'] = None
add fixed effect term to the model Args: F: sample design matrix for the fixed effect [N,K] A: trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P] Ftest: sample design matrix for test samples [Ntest,K]
625941c1627d3e7fe0d68dc6
def list(self, driver_type=None, detail=None): <NEW_LINE> <INDENT> filters = [] <NEW_LINE> if driver_type is not None: <NEW_LINE> <INDENT> filters.append('type=%s' % driver_type) <NEW_LINE> <DEDENT> if detail is not None: <NEW_LINE> <INDENT> filters.append('detail=%s' % detail) <NEW_LINE> <DEDENT> path = '' <NEW_LINE> if filters: <NEW_LINE> <INDENT> path = '?' + '&'.join(filters) <NEW_LINE> <DEDENT> return self._list(self._path(path), self._resource_name)
Retrieve a list of drivers. :param driver_type: Optional, string to filter the drivers by type. Value should be 'classic' or 'dynamic'. :param detail: Optional, flag whether to return detailed information about drivers. Default is None means not to send the arg to the server due to older versions of the server cannot handle filtering on detail. :returns: A list of drivers.
625941c1b57a9660fec337fa
def worker_remove(self): <NEW_LINE> <INDENT> for queues in self.removed_output: <NEW_LINE> <INDENT> queue = self.removed_output[queues] <NEW_LINE> if queue.qsize() and not len(self.writer_threads['removed']): <NEW_LINE> <INDENT> if queues in ('Audio', 'MusicArtist', 'AlbumArtist', 'MusicAlbum'): <NEW_LINE> <INDENT> new_thread = RemovedWorker(queue, self.music_database_lock, "music", self.server, self.direct_path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> new_thread = RemovedWorker(queue, self.database_lock, "video", self.server, self.direct_path) <NEW_LINE> <DEDENT> LOG.info("-->[ q:removed/%s/%s ]", queues, id(new_thread)) <NEW_LINE> self.writer_threads['removed'].append(new_thread) <NEW_LINE> self.enable_pending_refresh()
Remove items from the Kodi database.
625941c1d10714528d5ffc59
def get_js(self): <NEW_LINE> <INDENT> self.response.write( 'config = ' + json.dumps( self.get(), sort_keys=True, indent=4, separators=(',', ': '), default=encoder.custom_json_serializer,) + ';' )
Return scitran config in javascript format.
625941c17b180e01f3dc4779
def clustering(G,nbunch=None,with_labels=False,weights=False): <NEW_LINE> <INDENT> if G.is_directed(): <NEW_LINE> <INDENT> raise NetworkXError("Clustering algorithms are not defined for directed graphs.") <NEW_LINE> <DEDENT> if with_labels and weights: <NEW_LINE> <INDENT> clusterc={} <NEW_LINE> weights={} <NEW_LINE> for v,d,t in _triangles_and_degree_iter(G,nbunch): <NEW_LINE> <INDENT> weights[v]=float(d*(d-1)) <NEW_LINE> if t==0: <NEW_LINE> <INDENT> clusterc[v]=0.0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> clusterc[v]=t/float(d*(d-1)) <NEW_LINE> <DEDENT> <DEDENT> scale=1./sum(weights.itervalues()) <NEW_LINE> for v,w in weights.iteritems(): <NEW_LINE> <INDENT> weights[v]=w*scale <NEW_LINE> <DEDENT> return clusterc,weights <NEW_LINE> <DEDENT> clusterc={} <NEW_LINE> for v,d,t in _triangles_and_degree_iter(G,nbunch): <NEW_LINE> <INDENT> if t==0: <NEW_LINE> <INDENT> clusterc[v]=0.0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> clusterc[v]=t/float(d*(d-1)) <NEW_LINE> <DEDENT> <DEDENT> if with_labels: <NEW_LINE> <INDENT> return clusterc <NEW_LINE> <DEDENT> elif nbunch in G: <NEW_LINE> <INDENT> return clusterc.values()[0] <NEW_LINE> <DEDENT> return clusterc.values()
Compute the clustering coefficient for nodes. For each node find the fraction of possible triangles that exist, .. math:: c_v = \frac{2 T(v)}{deg(v)(deg(v)-1)} where :math:`T(v)` is the number of triangles through node :math:`v`. Parameters ---------- G : graph A networkx graph nbunch : container of nodes, optional Limit to specified nodes. Default is entire graph. with_labels: bool, optional If True return a dictionary keyed by node label. weights : bool, optional If True return fraction of connected triples as dictionary Returns ------- out : float, list, dictionary or tuple of dictionaries Clustering coefficient at specified nodes Examples -------- >>> G=nx.complete_graph(5) >>> print nx.clustering(G,0) 1.0 >>> print nx.clustering(G,with_labels=True) {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} Notes ----- The weights are the fraction of connected triples in the graph which include the keyed node. Ths is useful for computing transitivity. Self loops are ignored.
625941c15510c4643540f361
def form_K(self): <NEW_LINE> <INDENT> self.b = np.zeros(self.Num_var) <NEW_LINE> for num, bi in enumerate(self.b_): <NEW_LINE> <INDENT> self.b[num * 3:num * 3 + 6] += bi <NEW_LINE> <DEDENT> self.b = np.matmul(np.matmul(np.transpose(self.tran_m), self.b), self.tran_m) <NEW_LINE> self.Koc = np.zeros((self.Num_var, self.Num_var)) <NEW_LINE> for num in range(self.num_ele): <NEW_LINE> <INDENT> self.Koc[num * 3:num * 3 + 6, num * 3:num * 3 + 6] += self.K_ <NEW_LINE> <DEDENT> self.K = np.matmul(np.matmul(np.transpose(self.tran_m), self.Koc), self.tran_m) <NEW_LINE> Naming = [] <NEW_LINE> for i in range(self.num_ele + 1): <NEW_LINE> <INDENT> Naming.append([self.id, 'u', i, '_']) <NEW_LINE> Naming.append([self.id, 'w', i, '_']) <NEW_LINE> Naming.append([self.id, 'fi', i, '_']) <NEW_LINE> <DEDENT> self.Naming_fe = np.array(Naming, dtype='U25') <NEW_LINE> name = "" <NEW_LINE> for sup in self.supports_1: <NEW_LINE> <INDENT> name = name+sup.id+" " <NEW_LINE> <DEDENT> self.Naming_fe[0:3, 3] = name <NEW_LINE> name = "" <NEW_LINE> for sup in self.supports_2: <NEW_LINE> <INDENT> name = name+sup.id+" " <NEW_LINE> <DEDENT> self.Naming_fe[-3:, 3] = name
Forms main stifness matrix and main loaing vector for beam/truss.
625941c1bde94217f3682d6b
@step('I accept the alert') <NEW_LINE> def accept_alert(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> alert = Alert(world.browser) <NEW_LINE> alert.accept() <NEW_LINE> <DEDENT> except WebDriverException: <NEW_LINE> <INDENT> pass
Accept the alert.
625941c18c3a873295158330
def kickLoc(self): <NEW_LINE> <INDENT> my = self.brain.my <NEW_LINE> if (my.h <= 45. and my.h >= -45.): <NEW_LINE> <INDENT> return self.chooseDynamicKick() <NEW_LINE> <DEDENT> elif (my.h <= 135. and my.h > 45.): <NEW_LINE> <INDENT> return kicks.LEFT_SIDE_KICK <NEW_LINE> <DEDENT> elif (my.h >= -135. and my.h < -45.): <NEW_LINE> <INDENT> return kicks.RIGHT_SIDE_KICK <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.chooseBackKick()
returns kick using localization
625941c10a50d4780f666e08
def calculate_interval_statistics(self) -> dict: <NEW_LINE> <INDENT> number_of_intervals = len(self.intervals) <NEW_LINE> list_duration = [ (self.timestamps[item[-1]] - self.timestamps[item[0]] ).total_seconds() for item in self.intervals ] <NEW_LINE> min_duration_interval = min(list_duration) <NEW_LINE> max_duration_interval = max(list_duration) <NEW_LINE> avg_duration_interval = (sum(list_duration) / len(list_duration)) <NEW_LINE> list_distance = [ self.distances[item[-1]] - self.distances[item[0]] for item in self.intervals ] <NEW_LINE> min_distance_interval = min(list_distance) <NEW_LINE> max_distance_interval = max(list_distance) <NEW_LINE> avg_distance_interval = sum(list_distance) / len( list_distance ) <NEW_LINE> data = { 'number_of_intervals': number_of_intervals, 'min_duration': min_duration_interval, 'max_duration': max_duration_interval, 'avg_duration': avg_duration_interval, 'min_distance': min_distance_interval, 'max_distance': max_distance_interval, 'avg_distance': avg_distance_interval, } <NEW_LINE> return data
Method for calculating interval statistics. Returns: data = { 'number_of_intervals': number_of_intervals, 'min_duration': min_duration_interval, 'max_duration': max_duration_interval, 'avg_duration': avg_duration_interval, 'min_distance': min_distance_interval, 'max_distance': max_distance_interval, 'avg_distance': avg_distance_interval, }
625941c1cb5e8a47e48b7a25
def testMismatches(self): <NEW_LINE> <INDENT> cat1 = afwTable.SourceCatalog(self.table) <NEW_LINE> cat2 = afwTable.SourceCatalog(self.table) <NEW_LINE> nobj = 100 <NEW_LINE> for i in range(nobj): <NEW_LINE> <INDENT> s1 = cat1.addNew() <NEW_LINE> s2 = cat2.addNew() <NEW_LINE> s1.setId(i) <NEW_LINE> s2.setId(i) <NEW_LINE> s1.set(afwTable.SourceTable.getCoordKey().getRa(), (10 + 0.0001*i) * lsst.geom.degrees) <NEW_LINE> s2.set(afwTable.SourceTable.getCoordKey().getRa(), (10.005 + 0.0001*i) * lsst.geom.degrees) <NEW_LINE> s1.set(afwTable.SourceTable.getCoordKey().getDec(), (10 + 0.0001*i) * lsst.geom.degrees) <NEW_LINE> s2.set(afwTable.SourceTable.getCoordKey().getDec(), (10.005 + 0.0001*i) * lsst.geom.degrees) <NEW_LINE> <DEDENT> for closest in (True, False): <NEW_LINE> <INDENT> mc = afwTable.MatchControl() <NEW_LINE> mc.findOnlyClosest = closest <NEW_LINE> mc.includeMismatches = False <NEW_LINE> matches = afwTable.matchRaDec( cat1, cat2, 1.0*lsst.geom.arcseconds, mc) <NEW_LINE> mc.includeMismatches = True <NEW_LINE> matchesMismatches = afwTable.matchRaDec( cat1, cat2, 1.0*lsst.geom.arcseconds, mc) <NEW_LINE> catMatches = afwTable.SourceCatalog(self.table) <NEW_LINE> catMismatches = afwTable.SourceCatalog(self.table) <NEW_LINE> for m in matchesMismatches: <NEW_LINE> <INDENT> if m[1] is not None: <NEW_LINE> <INDENT> if not any(x == m[0] for x in catMatches): <NEW_LINE> <INDENT> catMatches.append(m[0]) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> catMismatches.append(m[0]) <NEW_LINE> <DEDENT> <DEDENT> if closest: <NEW_LINE> <INDENT> self.assertEqual(len(catMatches), len(matches)) <NEW_LINE> <DEDENT> matches2 = afwTable.matchRaDec( catMatches, cat2, 1.0*lsst.geom.arcseconds, mc) <NEW_LINE> self.assertEqual(len(matches), len(matches2)) <NEW_LINE> mc.includeMismatches = False <NEW_LINE> noMatches = afwTable.matchRaDec( catMismatches, cat2, 1.0*lsst.geom.arcseconds, mc) <NEW_LINE> self.assertEqual(len(noMatches), 0)
Chech that matchRaDec works as expected when using the includeMismatches option
625941c1f7d966606f6a9f7a
def fetch_userid(cookies: dict) -> str: <NEW_LINE> <INDENT> r = requests.head('https://www.imdb.com/profile', cookies=cookies) <NEW_LINE> r.raise_for_status() <NEW_LINE> m = re.search(r'ur\d+', r.headers['Location']) <NEW_LINE> if not m: <NEW_LINE> <INDENT> raise Exception("\n\nCan't log into IMDb.\n" f'Make sure that your IMDb cookie in {COOKIE_FNAME} is correct.\n' f'{README_REF}') <NEW_LINE> <DEDENT> return m.group()
User ID is required for exporting any lists. Cookie validity will also be checked here.
625941c126238365f5f0ede3
def filter_ngram(gram, mode='any'): <NEW_LINE> <INDENT> filtered = [filter_word(w) for w in gram] <NEW_LINE> if mode == 'any': <NEW_LINE> <INDENT> return any(filtered) <NEW_LINE> <DEDENT> elif mode == 'all': <NEW_LINE> <INDENT> return all(filtered) <NEW_LINE> <DEDENT> elif mode == 'ends': <NEW_LINE> <INDENT> return filtered[0] or filtered[-1] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('Invalid mode: %s' % mode)
Decide whether to keep or discard an n-gram. Args: gram: list of tokens (length N) mode: Option to throw out ngram if 'any': any single token passes filter_word 'all': all tokens pass filter_word 'ends': book-ended by filterable tokens
625941c1099cdd3c635f0bd4
def get_applied_substereotypes(self, stereotype=None): <NEW_LINE> <INDENT> raise NotImplementedError( "operation get_applied_substereotypes(...) not yet implemented" )
Retrieves the substereotypes of the specified stereotype that are applied to this element.
625941c17c178a314d6ef3d4
def testParallel_DelayedFailure(self): <NEW_LINE> <INDENT> after = 5 <NEW_LINE> tree = owyl.parallel(owyl.succeed(), owyl.failAfter(after=after), policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL) <NEW_LINE> v = owyl.visit(tree) <NEW_LINE> results = [x for x in v if x is not None] <NEW_LINE> self.assertEqual(results, [False]) <NEW_LINE> v = owyl.visit(tree) <NEW_LINE> results = [x for x in v if x is not None] <NEW_LINE> self.assertEqual(results, [False])
Can parallel fail if child fails later (all succeed)?
625941c191af0d3eaac9b98e
def run(self): <NEW_LINE> <INDENT> pass
Run in background. To be implemented in child class.
625941c12c8b7c6e89b3573a
def transform(self, X, details=False): <NEW_LINE> <INDENT> X = self.preprocess(X) <NEW_LINE> p_y_given_x, _, log_z = self.calculate_latent(X, self.theta) <NEW_LINE> labels = self.label(p_y_given_x) <NEW_LINE> if details == 'surprise': <NEW_LINE> <INDENT> n_samples = X.shape[0] <NEW_LINE> alpha = np.zeros((self.n_hidden, self.n_visible)) <NEW_LINE> for i in range(self.n_visible): <NEW_LINE> <INDENT> alpha[np.argmax(self.alpha[:, i]), i] = 1 <NEW_LINE> <DEDENT> log_p = np.empty((2, n_samples, self.n_hidden)) <NEW_LINE> c0 = np.einsum('ji,ij->j', alpha, self.theta[0]) <NEW_LINE> c1 = np.einsum('ji,ij->j', alpha, self.theta[1]) <NEW_LINE> info0 = np.einsum('ji,ij->ij', alpha, self.theta[2] - self.theta[0]) <NEW_LINE> info1 = np.einsum('ji,ij->ij', alpha, self.theta[3] - self.theta[1]) <NEW_LINE> log_p[1] = c1 + X.dot(info1) <NEW_LINE> log_p[0] = c0 + X.dot(info0) <NEW_LINE> surprise = [-np.sum([log_p[labels[l, j], l, j] for j in range(self.n_hidden)]) for l in range(n_samples)] <NEW_LINE> return p_y_given_x, log_z, np.array(surprise) <NEW_LINE> <DEDENT> elif details: <NEW_LINE> <INDENT> return p_y_given_x, log_z <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return pd.DataFrame(labels)
Label hidden factors for (possibly previously unseen) samples of data. Parameters: samples of data, X, shape = [n_samples, n_visible] Returns: , shape = [n_samples, n_hidden]
625941c1be383301e01b5402
def getUsersSongData(self, userID): <NEW_LINE> <INDENT> self.cursor.execute( "SELECT Song_ID, Rating FROM User_Song WHERE User_ID = ? ;", (userID, )) <NEW_LINE> songs = self.cursor.fetchone() <NEW_LINE> songList = [] <NEW_LINE> while songs: <NEW_LINE> <INDENT> songList.append([songs[0], songs[1]]) <NEW_LINE> songs = self.cursor.fetchone() <NEW_LINE> <DEDENT> cleanData = {'Key': [], 'Mode': [], 'Acousticness': [], 'Danceability': [], 'Energy': [], 'Instrumentalness': [ ], 'Liveness': [], 'Loudness': [], 'Speechiness': [], 'Valence': [], 'Tempo': [], 'Rating': []} <NEW_LINE> for song in songList: <NEW_LINE> <INDENT> self.cursor.execute( "SELECT * FROM Song WHERE Song_ID = ? ;", (song[0], )) <NEW_LINE> data = self.cursor.fetchone() <NEW_LINE> if data: <NEW_LINE> <INDENT> cleanData['Key'].append(data[1]) <NEW_LINE> cleanData['Mode'].append(data[2]) <NEW_LINE> cleanData['Acousticness'].append(data[3]) <NEW_LINE> cleanData['Danceability'].append(data[4]) <NEW_LINE> cleanData['Energy'].append(data[5]) <NEW_LINE> cleanData['Instrumentalness'].append(data[6]) <NEW_LINE> cleanData['Liveness'].append(data[7]) <NEW_LINE> cleanData['Loudness'].append(data[8]) <NEW_LINE> cleanData['Speechiness'].append(data[9]) <NEW_LINE> cleanData['Valence'].append(data[10]) <NEW_LINE> cleanData['Tempo'].append(data[11]) <NEW_LINE> cleanData['Rating'].append(song[1]) <NEW_LINE> <DEDENT> <DEDENT> if len(cleanData['Key']) == 0: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> clean = pd.DataFrame(cleanData) <NEW_LINE> return clean
Method to get list of songs and their ratings related to a specific user Input: userID (int) Output: cleaned ND array of songs data (pandas dataframe), or None if there is no data
625941c157b8e32f52483412
def valid_passports2(passports, required_fields): <NEW_LINE> <INDENT> valid = 0 <NEW_LINE> for passport in passports: <NEW_LINE> <INDENT> if required_fields.issubset(set(passport.keys())): <NEW_LINE> <INDENT> byr = passport["byr"] <NEW_LINE> if len(byr)!=4 or int(byr) < 1920 or int(byr) > 2002: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> iyr = passport["iyr"] <NEW_LINE> if len(iyr)!=4 or int(iyr) < 2010 or int(iyr) > 2020: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> eyr = passport["eyr"] <NEW_LINE> if len(eyr)!=4 or int(eyr) < 2020 or int(eyr) > 2030: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> hgt = passport["hgt"] <NEW_LINE> if "cm" in hgt: <NEW_LINE> <INDENT> cm = int(re.findall(r"\d+", hgt)[0]) <NEW_LINE> if cm < 150 or cm > 193: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> elif "in" in hgt: <NEW_LINE> <INDENT> inch = int(re.findall(r"\d+", hgt)[0]) <NEW_LINE> if inch < 59 or inch > 76: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> hcl = passport["hcl"] <NEW_LINE> if not re.fullmatch(r"#[0-9a-f]{6}", hcl): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> ecl = passport["ecl"] <NEW_LINE> valid_colors = ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"] <NEW_LINE> if ecl not in valid_colors: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> pid = passport["pid"] <NEW_LINE> if not re.fullmatch(r"^[0-9]{9}", pid): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> valid += 1 <NEW_LINE> <DEDENT> <DEDENT> return valid
New rules for part 2: byr (Birth Year) - four digits; at least 1920 and at most 2002. iyr (Issue Year) - four digits; at least 2010 and at most 2020. eyr (Expiration Year) - four digits; at least 2020 and at most 2030. hgt (Height) - a number followed by either cm or in: If cm, the number must be at least 150 and at most 193. If in, the number must be at least 59 and at most 76. hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f. ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth. pid (Passport ID) - a nine-digit number, including leading zeroes. cid (Country ID) - ignored, missing or not.
625941c185dfad0860c3add2
def test_pass_none(self): <NEW_LINE> <INDENT> self.assertFilterPasses(None)
``None`` always passes this Filter. Use `Required | Date` if you want to reject null values.
625941c1c432627299f04bbc
def set_dielectric_constant (self, e_c): <NEW_LINE> <INDENT> self.dielectric_constant = e_c
Set the dielectric constant of water
625941c121bff66bcd6848cd
def InitOrientationVars(self)->None: <NEW_LINE> <INDENT> Logger.debug( u'Setting Orientation Variables #1: Screen Size: [%s], Width: [%s], Height: [%s], Orientation: [%s]' % ( str(Globals.fScreenSize), str(self._app_window._size[0]), str(self._app_window._size[1]), str(Globals.uDeviceOrientation))) <NEW_LINE> OS_GetWindowSize() <NEW_LINE> if Globals.iAppWidth < Globals.iAppHeight: <NEW_LINE> <INDENT> Globals.uDeviceOrientation = 'portrait' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Globals.uDeviceOrientation = 'landscape' <NEW_LINE> <DEDENT> Globals.oRotation.Lock() <NEW_LINE> SetVar(uVarName = u'DEVICEORIENTATION', oVarValue = Globals.uDeviceOrientation) <NEW_LINE> SetVar(uVarName = u'SCREENSIZE', oVarValue = str(Globals.fScreenSize)) <NEW_LINE> Logger.debug(u'Setting Orientation Variables: Screen Size: [%s], Width: [%s], Height: [%s], Orientation: [%s]' % (str(Globals.fScreenSize), str(Globals.iAppWidth), str(Globals.iAppHeight), str(Globals.uDeviceOrientation)))
Getting the orientation of the App and sets to system vars for it
625941c13c8af77a43ae3716
def set_correct_acqiris_time(self, correct_time=True): <NEW_LINE> <INDENT> self.correct_time = correct_time
On/off correction of time for acqiris
625941c14428ac0f6e5ba769
def populate_or_delete_offer_enrollment_education_group_year_id(apps, schema_editor): <NEW_LINE> <INDENT> OfferEnrollment = apps.get_model('base', 'offerenrollment') <NEW_LINE> EducationGroupYear = apps.get_model('base', 'educationgroupyear') <NEW_LINE> off_enrollments_without_educ_group = OfferEnrollment.objects.filter( education_group_year_id__isnull=True ).select_related( 'offer_year__academic_year' ) <NEW_LINE> deleted = set() <NEW_LINE> for obj in off_enrollments_without_educ_group: <NEW_LINE> <INDENT> education_group_year_id = EducationGroupYear.objects.filter( acronym=obj.offer_year.acronym, academic_year=obj.offer_year.academic_year, ).values_list('pk', flat=True).first() <NEW_LINE> if not education_group_year_id: <NEW_LINE> <INDENT> deleted.add('Removing all offerenrollments of {} in {}'.format(obj.offer_year.acronym, obj.offer_year.academic_year.year)) <NEW_LINE> obj.delete() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> obj.education_group_year_id = education_group_year_id <NEW_LINE> obj.save() <NEW_LINE> <DEDENT> <DEDENT> for msg in sorted(deleted): <NEW_LINE> <INDENT> print(msg)
Same script as migration file 0576 in Osis
625941c123e79379d52ee4de
def _get_price_table(price_table_uri, start_year, end_year): <NEW_LINE> <INDENT> price_dict = utils.build_lookup_from_csv(price_table_uri, 'year') <NEW_LINE> try: <NEW_LINE> <INDENT> return numpy.array([price_dict[year]['price'] for year in xrange(start_year, end_year+1)]) <NEW_LINE> <DEDENT> except KeyError as missing_year: <NEW_LINE> <INDENT> raise KeyError('Carbon price table does not contain a price value for ' '%s' % missing_year)
Get price table. Parameters: price_table_uri (str): filepath to price table csv file start_year (int): start year of analysis end_year (int): end year of analysis Returns: price_t (numpy.array): price for each year.
625941c15fc7496912cc38f6