content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import castle from typing import Tuple if __name__ == '__main__': main()
[ 11748, 16669, 198, 198, 6738, 19720, 1330, 309, 29291, 628, 628, 628, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419, 198 ]
2.71875
32
#!/usr/bin/env python3 # Copyright 2018 Brocade Communications Systems LLC. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may also obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :mod:`switch_configuration_show` - PyFOS util for configuring switch operation ******************************************************************************** The :mod:`switch_configuration_show` util provides for configuring switch \ operation. This module is a stand-alone script that can be used to display switch attributes. * Input: * -L=<login>: The login ID. If not provided, an interactive prompt will request one. * -P=<password>: The password. If not provided, an interactive prompt will request one. * -i=<IP address>: The IP address. * -f=<VFID>: The VFID or -1 if VF is disabled. If unspecified, a VFID of 128 is assumed. * Output: * The switch attributes in JSON format. .. function:: show_switch_conf(session) Example Usage of the Method:: ret = switch_configuration_show.show_switch_conf(session) print (ret) Details:: switch_conf_obj = switch_configuration() result = switch_conf_obj.get(session) return result * Input: :param session: The session returned by login. * Output: :rtype: A dictionary of return status matching the REST response. *Use Cases* 1. Retrieve the configuration parameters of the switch. """ import sys from pyfos import pyfos_auth import pyfos.pyfos_brocade_fibrechannel_configuration as py_fc from pyfos import pyfos_util from pyfos.utils import brcd_util switch = py_fc.switch_configuration if __name__ == "__main__": main(sys.argv[1:])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 15069, 2864, 2806, 46395, 14620, 11998, 11419, 13, 220, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 3415...
3.029891
736
from django.shortcuts import render,redirect,reverse from . import forms,models from django.db.models import Sum from django.contrib.auth.models import Group from django.http import HttpResponseRedirect from django.contrib.auth.decorators import login_required,user_passes_test from django.conf import settings from django.db.models import Q #for showing signup/login button for customer #for showing signup/login button for mechanics #for showing signup/login button for ADMIN(by sumit) #for checking user customer, mechanic or admin(by sumit) #============================================================================================ # ADMIN RELATED views start #============================================================================================ #============================================================================================ # ADMIN RELATED views END #============================================================================================ #============================================================================================ # CUSTOMER RELATED views start #============================================================================================ #============================================================================================ # CUSTOMER RELATED views END #============================================================================================ #============================================================================================ # MECHANIC RELATED views start #============================================================================================ #============================================================================================ # MECHANIC RELATED views start #============================================================================================ # for aboutus and contact def aboutus_view(request): return render(request,'vehicle/aboutus.html') def contactus_view(request): sub = forms.ContactusForm() if request.method == 'POST': sub = forms.ContactusForm(request.POST) if sub.is_valid(): email = sub.cleaned_data['Email'] name=sub.cleaned_data['Name'] message = sub.cleaned_data['Message'] send_mail(str(name)+' || '+str(email),message,settings.EMAIL_HOST_USER, settings.EMAIL_RECEIVING_USER, fail_silently = False) return render(request, 'vehicle/contactussuccess.html') return render(request, 'vehicle/contactus.html', {'form':sub})
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 11, 445, 1060, 11, 50188, 198, 6738, 764, 1330, 5107, 11, 27530, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 5060, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, ...
4.437819
587
import random import numpy as np import itertools import re from collections import defaultdict import os def convert_to_simple_label(tag, rep="disf1_uttseg"): """Takes the complex tag set and gives back the simple, smaller version with ten tags: """ disftag = "<f/>" if "<rm-" in tag: disftag = "<rm-0/>" elif "<e" in tag: disftag = "<e/>" if "uttseg" in rep: # if combined task with TTO m = re.search(r'<[ct]*/>', tag) if m: return disftag + m.group(0) else: print("WARNING NO TAG", +tag) return "" return disftag # if not TT0 def convert_from_full_tag_set_to_idx(tag, rep, idx_to_label): """Maps from the full tag set of trp repairs to the new dictionary""" if "simple" in rep: tag = convert_to_simple_label(tag) for k, v in idx_to_label.items(): if v in tag: # a substring relation return k def add_word_continuation_tags(tags): """In place, add a continutation tag to each word: <cc/> -word continues current dialogue act and the next word will also continue it <ct/> -word continues current dialogue act and is the last word of it <tc/> -word starts this dialogue act tag and the next word continues it <tt/> -word starts and ends dialogue act (single word dialogue act) """ tags = list(tags) for i in range(0, len(tags)): if i == 0: tags[i] = tags[i] + "<t" else: tags[i] = tags[i] + "<c" if i == len(tags)-1: tags[i] = tags[i] + "t/>" else: tags[i] = tags[i] + "c/>" return tags def verify_disfluency_tags(tags, normalize_ID=False): """Check that the repair tags sequence is valid. Keyword arguments: normalize_ID -- boolean, whether to convert the repair ID numbers to be derivable from their unique RPS position in the utterance. """ id_map = dict() # map between old ID and new ID # in first pass get old and new IDs for i in range(0, len(tags)): rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[i]) if rps: id_map[rps[0][rps[0].find("=")+2:-3]] = str(i) # key: old repair ID, value, list [reparandum,interregnum,repair] # all True when repair is all there repairs = defaultdict(list) for r in id_map.keys(): repairs[r] = [None, None, None] # three valued None<False<True # print(repairs) # second pass verify the validity of the tags # and (optionally) modify the IDs for i in range(0, len(tags)): # iterate over all tag strings new_tags = [] if tags[i] == "": assert(all([repairs[ID][2] or repairs[ID] == [None, None, None] for ID in repairs.keys()])),\ "Unresolved repairs at fluent tag\n\t" + str(repairs) for tag in get_tags(tags[i]): # iterate over all tags # print(i) # print(tag) if tag == "<e/>": new_tags.append(tag) continue ID = tag[tag.find("=")+2:-3] if "<rms" in tag: assert repairs[ID][0] == None,\ "reparandum started parsed more than once " + ID assert repairs[ID][1] == None,\ "reparandum start again during interregnum phase " + ID assert repairs[ID][2] == None,\ "reparandum start again during repair phase " + ID repairs[ID][0] = False # set in progress elif "<rm " in tag: assert repairs[ID][0] != None,\ "mid reparandum tag before reparandum start " + ID assert repairs[ID][2] == None,\ "mid reparandum tag in a interregnum phase or beyond " + ID assert repairs[ID][2] == None,\ "mid reparandum tag in a repair phase or beyond " + ID elif "<i" in tag: assert repairs[ID][0] != None,\ "interregnum start before reparandum start " + ID assert repairs[ID][2] == None,\ "interregnum in a repair phase " + ID if repairs[ID][1] == None: # interregnum not reached yet repairs[ID][0] = True # reparandum completed repairs[ID][1] = False # interregnum in progress elif "<rps" in tag: assert repairs[ID][0] != None,\ "repair start before reparandum start " + ID assert repairs[ID][1] != True,\ "interregnum over before repair start " + ID assert repairs[ID][2] == None,\ "repair start parsed twice " + ID repairs[ID][0] = True # reparanudm complete repairs[ID][1] = True # interregnum complete repairs[ID][2] = False # repair in progress elif "<rp " in tag: assert repairs[ID][0] == True,\ "mid repair word start before reparandum end " + ID assert repairs[ID][1] == True,\ "mid repair word start before interregnum end " + ID assert repairs[ID][2] == False,\ "mid repair tag before repair start tag " + ID elif "<rpn" in tag: # make sure the rps is order in tag string is before assert repairs[ID][0] == True,\ "repair end before reparandum end " + ID assert repairs[ID][1] == True,\ "repair end before interregnum end " + ID assert repairs[ID][2] == False,\ "repair end before repair start " + ID repairs[ID][2] = True # do the replacement of the tag's ID after checking new_tags.append(tag.replace(ID, id_map[ID])) if normalize_ID: tags[i] = "".join(new_tags) assert all([repairs[ID][2] for ID in repairs.keys()]),\ "Unresolved repairs:\n\t" + str(repairs) def shuffle(lol, seed): """Shuffle inplace each list in the same order. lol :: list of list as input seed :: seed the shuffling """ for l in lol: random.seed(seed) random.shuffle(l) def minibatch(l, bs): """Returns a list of minibatches of indexes which size is equal to bs border cases are treated as follow: eg: [0,1,2,3] and bs = 3 will output: [[0],[0,1],[0,1,2],[1,2,3]] l :: list of word idxs """ out = [l[:i] for i in xrange(1, min(bs, len(l)+1))] out += [l[i-bs:i] for i in xrange(bs, len(l)+1)] assert len(l) == len(out) return out def indices_from_length(sentence_length, bs, start_index=0): """Return a list of indexes pairs (start/stop) for each word max difference between start and stop equal to bs border cases are treated as follow: eg: sentenceLength=4 and bs = 3 will output: [[0,0],[0,1],[0,2],[1,3]] """ l = map(lambda x: start_index+x, xrange(sentence_length)) out = [] for i in xrange(0, min(bs, len(l))): out.append([l[0], l[i]]) for i in xrange(bs+1, len(l)+1): out.append([l[i-bs], l[i-1]]) assert len(l) == sentence_length return out def context_win(l, win): """Return a list of list of indexes corresponding to context windows surrounding each word in the sentence given a list of indexes composing a sentence. win :: int corresponding to the size of the window """ assert (win % 2) == 1 assert win >= 1 l = list(l) lpadded = win/2 * [-1] + l + win/2 * [-1] out = [lpadded[i:i+win] for i in range(len(l))] assert len(out) == len(l) return out def context_win_backwards(l, win): '''Same as contextwin except only backwards context (i.e. like an n-gram model) ''' assert win >= 1 l = list(l) lpadded = (win-1) * [-1] + l out = [lpadded[i: i+win] for i in range(len(l))] assert len(out) == len(l) return out def corpus_to_indexed_matrix(my_array_list, win, bs, sentence=False): """Returns a matrix of contextwins for a list of utterances of dimensions win * n_words_in_corpus (i.e. total length of all arrays in my_array_list) and corresponding matrix of indexes (of just start/stop for each one) so 2 * n_words_in_corpus of where to access these, using bs (backprop distance) as the limiting history size """ sentences = [] # a list (of arrays, or lists?), returned as matrix indices = [] # a list of index pairs (arrays?), returned as matrix totalSize = 0 if sentence: for sent in my_array_list: mysent = np.asarray([-1] * (bs-1) + list(sent)) # padding with eos # get list of context windows mywords = context_win_backwards(mysent, win) # just one per utterance for now.. cindices = [[totalSize, totalSize+len(mywords)-1]] cwords = [] for i in range(bs, len(mywords)+1): words = list(itertools.chain(*mywords[(i-bs):i])) cwords.append(words) # always (bs * n) words long # print cwords sentences.extend(cwords) indices.extend(cindices) totalSize += len(cwords) else: for sentence in my_array_list: # get list of context windows cwords = context_win_backwards(sentence, win) cindices = indices_from_length(len(cwords), bs, totalSize) indices.extend(cindices) sentences.extend(cwords) totalSize += len(cwords) for s in sentences: if any([x is None for x in s]): print(s) return np.matrix(sentences, dtype='int32'), indices def convert_from_eval_tags_to_inc_disfluency_tags(tags, words, representation="disf1", limit=8): """Conversion from disfluency tagged corpus with xml-style tags as from STIR (https://bitbucket.org/julianhough/stir) to the strictly left-to-right schemas as described by Hough and Schlangen 2015 Interspeech paper, which are used by RNN architectures at runtime. Keyword arguments: tags -- the STIR eval style disfluency tags words -- the words in the utterance representation -- the number corresponding to the type of tagging system 1=standard, 2=rm-N values where N does not count intervening edit terms 3=same as 2 but with a 'c' tag after edit terms have ended. limit -- the limit on the distance back from the repair start """ repair_dict = defaultdict(list) new_tags = [] # print("tags") # print(tags) # print('words') # print(words) for t in range(0, len(tags)): if "uttseg" in representation: m = re.search(r'<[ct]*/>', tags[t]) if m: TTO_tag = m.group(0) tags[t] = tags[t].replace(TTO_tag, "") if "dact" in representation: m = re.search(r'<diact type="[^\s]*"/>', tags[t]) if m: dact_tag = m.group(0) tags[t] = tags[t].replace(dact_tag, "") if "laugh" in representation: m = re.search(r'<speechLaugh/>|<laughter/>', tags[t]) if m: laughter_tag = m.group(0) else: laughter_tag = "<nolaughter/>" tags[t] = tags[t].replace(laughter_tag, "") current_tag = "" if "<e/>" in tags[t] or "<i" in tags[t]: current_tag = "<e/>" # TODO may make this an interregnum if "<rms" in tags[t]: rms = re.findall("<rms id\=\"[0-9]+\"\/>", tags[t], re.S) for r in rms: repairID = r[r.find("=")+2:-3] repair_dict[repairID] = [t, 0] if "<rps" in tags[t]: rps = re.findall("<rps id\=\"[0-9]+\"\/>", tags[t], re.S) for r in rps: repairID = r[r.find("=")+2:-3] # print('repairID') # print(repairID) # print(repair_dict.get(repairID)) # print(str(repairID)+str(tags)+str(words)) assert repair_dict.get(repairID), str(repairID)+str(tags)+str(words) repair_dict[repairID][1] = t dist = min(t-repair_dict[repairID][0], limit) # adjust in case the reparandum is shortened due to the limit repair_dict[repairID][0] = t-dist current_tag += "<rm-{}/>".format(dist) + "<rpMid/>" if "<rpn" in tags[t]: rpns = re.findall("<rpnrep id\=\"[0-9]+\"\/>", tags[t], re.S) +\ re.findall("<rpnsub id\=\"[0-9]+\"\/>", tags[t], re.S) rpns_del = re.findall("<rpndel id\=\"[0-9]+\"\/>", tags[t], re.S) # slight simplifying assumption is to take the repair with # the longest reparandum as the end category repair_type = "" longestlength = 0 for r in rpns: repairID = r[r.find("=")+2:-3] l = repair_dict[repairID] if l[1]-l[0] > longestlength: longestlength = l[1]-l[0] repair_type = "Sub" for r in rpns_del: repairID = r[r.find("=")+2:-3] l = repair_dict[repairID] if l[1]-l[0] > longestlength: longestlength = l[1]-l[0] repair_type = "Del" if repair_type == "": raise Exception("Repair not passed \ correctly."+str(words)+str(tags)) current_tag += "<rpEnd"+repair_type+"/>" current_tag = current_tag.replace("<rpMid/>", "") if current_tag == "": current_tag = "<f/>" if "uttseg" in representation: current_tag += TTO_tag if "dact" in representation: current_tag += dact_tag if "laugh" in representation: current_tag += laughter_tag new_tags.append(current_tag) return new_tags def convert_from_inc_disfluency_tags_to_eval_tags( tags, words, start=0, representation="disf1_uttseg"): """Converts the incremental style output tags of the RNN to the standard STIR eval output tags. The exact inverse of convertFromEvalTagsToIncrementalDisfluencyTags. Keyword arguments: tags -- the RNN style disfluency tags words -- the words in the utterance start -- position from where to begin changing the tags from representation -- the number corresponding to the type of tagging system, 1=standard, 2=rm-N values where N does not count intervening edit terms 3=same as 2 but with a 'c' tag after edit terms have ended. """ # maps from the repair ID to a list of # [reparandumStart,repairStart,repairOver] repair_dict = defaultdict(list) new_tags = [] if start > 0: # assuming the tags up to this point are already converted new_tags = tags[:start] if "mid" not in representation: rps_s = re.findall("<rps id\=\"[0-9]+\"\/>", tags[start-1]) rpmid = re.findall("<rp id\=\"[0-9]+\"\/>", tags[start-1]) if rps_s: for r in rps_s: repairID = r[r.find("=")+2:-3] resolved_repair = re.findall( "<rpn[repsubdl]+ id\=\"{}\"\/>" .format(repairID), tags[start-1]) if not resolved_repair: if not rpmid: rpmid = [] rpmid.append(r.replace("rps ", "rp ")) if rpmid: newstart = start-1 for rp in rpmid: rps = rp.replace("rp ", "rps ") repairID = rp[rp.find("=")+2:-3] # go back and find the repair for b in range(newstart, -1, -1): if rps in tags[b]: repair_dict[repairID] = [b, b, False] break for t in range(start, len(tags)): current_tag = "" if "uttseg" in representation: m = re.search(r'<[ct]*/>', tags[t]) if m: TTO_tag = m.group(0) if "<e/>" in tags[t] or "<i/>" in tags[t]: current_tag = "<e/>" if "<rm-" in tags[t]: rps = re.findall("<rm-[0-9]+\/>", tags[t], re.S) for r in rps: # should only be one current_tag += '<rps id="{}"/>'.format(t) # print t-dist if "simple" in representation: # simply tagging the rps pass else: dist = int(r[r.find("-")+1:-2]) repair_dict[str(t)] = [max([0, t-dist]), t, False] # backwards looking search if full set # print new_tags, t, dist, t-dist, max([0, t-dist]) # print tags[:t+1] rms_start_idx = max([0, t-dist]) new_tags[rms_start_idx] = '<rms id="{}"/>'\ .format(t) + new_tags[rms_start_idx]\ .replace("<f/>", "") reparandum = False # interregnum if edit term for b in range(t-1, max([0, t-dist]), -1): if "<e" not in new_tags[b]: reparandum = True new_tags[b] = '<rm id="{}"/>'.format(t) +\ new_tags[b].replace("<f/>", "") if reparandum is False and "<e" in new_tags[b]: new_tags[b] = '<i id="{}"/>'.\ format(t) + new_tags[b] # repair ends if "<rpEnd" in tags[t]: rpns = re.findall("<rpEndSub/>", tags[t], re.S) rpns_del = re.findall("<rpEndDel/>", tags[t], re.S) rpnAll = rpns + rpns_del if rpnAll: for k, v in repair_dict.items(): if t >= int(k) and v[2] is False: repair_dict[k][2] = True # classify the repair if rpns_del: # a delete current_tag += '<rpndel id="{}"/>'.format(k) rpns_del.pop(0) continue reparandum = [words[i] for i in range(0, len(new_tags)) if '<rms id="{}"/>'. format(k) in new_tags[i] or '<rm id="{}"/>'. format(k) in new_tags[i]] repair = [words[i] for i in range(0, len(new_tags)) if '<rps id="{}"/>'.format(k) in new_tags[i] or '<rp id="{}"/>'.format(k) in new_tags[i]] + [words[t]] if reparandum == repair: current_tag += '<rpnrep id="{}"/>'.format(k) else: current_tag += '<rpnsub id="{}"/>'.format(k) # mid repair phases still in progress for k, v in repair_dict.items(): if t > int(k) and v[2] is False: current_tag += '<rp id="{}"/>'.format(k) if current_tag == "": current_tag = "<f/>" if "uttseg" in representation: current_tag += TTO_tag new_tags.append(current_tag) return new_tags def verify_dialogue_data_matrix(dialogue_data_matrix, word_dict=None, pos_dict=None, tag_dict=None, n_lm=0, n_acoustic=0): """Boolean check of whether dialogue data consistent with args. Checks all idxs are valid and number of features is correct. Standard form of each row of the matrix should be: utt_index, word_idx, pos_idx, word_duration, acoustic_feats.., lm_feats....,label """ l = 3 + n_acoustic + n_lm + 1 # row length try: for i, row in enumerate(dialogue_data_matrix): assert len(row) == l,\ "row {} wrong length {}, should be {}".format(i, len(row), l) assert word_dict[row[1]] is not None,\ "row[1][{}] {} not in word dict".format(i, row[1]) assert pos_dict[row[2]] is not None,\ "row[2][{}] {} not in POS dict".format(i, row[2]) assert tag_dict[row[-1]] is not None,\ "row[-1][{}] {} not in tag dict".format(i, row[-1]) except AssertionError as a: print(a) return False return True def verify_dialogue_data_matrices_from_folder(matrices_folder_filepath, word_dict=None, pos_dict=None, tag_dict=None, n_lm=0, n_acoustic=0): """A boolean check that the dialogue matrices make sense for the particular configuration in args and tag2idx dicts. """ for dialogue_file in os.listdir(matrices_folder_filepath): v = np.load(matrices_folder_filepath + "/" + dialogue_file,allow_pickle=True) if not verify_dialogue_data_matrix(v, word_dict=word_dict, pos_dict=pos_dict, tag_dict=tag_dict, n_lm=n_lm, n_acoustic=n_acoustic): # print"{} failed test".format(dialogue_file) return False return True def dialogue_data_and_indices_from_matrix(d_matrix, n_extra, pre_seg=False, window_size=2, bs=9, tag_rep="disf1_uttseg", tag_to_idx_map=None, in_utterances=False): """Transforming from input format of row: utt_index, word_idx, pos_idx, word_duration, acoustic_feats.., lm_feats....,label to 5-tuple of: word_idx, pos_idx, extra, labels, indices where :word_idx: and :pos_idx: have the correct window context according to @window_size and :indices: is the start and stop points for consumption by the net in training for each label in :labels:. :extra: is the matrix of extra features. """ if len(d_matrix)==0: return utt_indices = d_matrix[:, 0] words = d_matrix[:, 1] pos = d_matrix[:, 2] extra = None if n_extra == 0 else d_matrix[:, 3: -1] labels = d_matrix[:, -1] word_idx = [] pos_idx = [] current = [] indices = [] previous_idx = -1 for i, a_tuple in enumerate(zip(utt_indices, words, pos, labels)): utt_idx, w, p, l = a_tuple # print(w) current.append((w, p, l)) if pre_seg: if previous_idx != utt_idx or i == len(labels)-1: if in_utterances: start = 0 if indices == [] else indices[-1][1]+1 indices.append([start, start + (len(current)-1)]) else: indices.extend(indices_from_length(len(current), bs, start_index=len(indices))) word_idx.extend(context_win_backwards([x[0] for x in current], window_size)) pos_idx.extend(context_win_backwards([x[1] for x in current], window_size)) current = [] # print('final') # print(w) # print(word_idx) elif i == len(labels)-1: # indices = indices_from_length(len(current), bs) # currently a simple window of same size indices = [[j, j + bs] for j in range(0, len(current))] padding = [[-1, -1]] * (bs - window_size) word_idx = padding + context_win_backwards([x[0] for x in current], window_size) pos_idx = padding + context_win_backwards([x[1] for x in current], window_size) previous_idx = utt_idx # print(pos_idx) # print(word_idx) # print(extra) # print(labels) # print(indices) # return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx, # dtype=np.int32),\ # labels,\ # np.asarray(indices, dtype=np.int32) return np.asarray(word_idx, dtype=np.int32), np.asarray(pos_idx, dtype=np.int32),\ extra,\ labels,\ np.asarray(indices, dtype=np.int32) if __name__ == '__main__': tags = '<f/>,<rms id="3"/>,<i id="3"/><e/>,<rps id="3"/>' +\ '<rpnsub id="3"/>,<f/>,<e/>,<f/>,' + \ '<f/>' tags = tags.split(",") words = "i,like,uh,love,to,uh,love,alot".split(",") # print(tags) # print(len(tags)) # print(len(words)) new_tags = convert_from_eval_tags_to_inc_disfluency_tags( tags, words, representation="disf1") # print(new_tags) old_tags = convert_from_inc_disfluency_tags_to_eval_tags( new_tags, words, representation="disf1") assert old_tags == tags, "\n " + str(old_tags) + "\n" + str(tags) x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] # print(context_win_backwards(x, 2)) # print "indices", indices_from_length(11, 9)
[ 11748, 4738, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 340, 861, 10141, 198, 11748, 302, 198, 6738, 17268, 1330, 4277, 11600, 198, 11748, 28686, 628, 628, 198, 4299, 10385, 62, 1462, 62, 36439, 62, 18242, 7, 12985, 11, 1128, 2625,...
1.856602
14,533
from rest_framework.pagination import PageNumberPagination
[ 6738, 1334, 62, 30604, 13, 79, 363, 1883, 1330, 7873, 15057, 47, 363, 1883 ]
4.142857
14
''' @author Gabriel Flores Checks the primality of an integer. ''' def is_prime(x): ''' Checks the primality of an integer. ''' sqrt = int(x ** (1/2)) for i in range(2, sqrt, 1): if x % i == 0: return False return True if __name__ == "__main__": main()
[ 7061, 6, 198, 197, 31, 9800, 17371, 40222, 198, 197, 7376, 4657, 262, 2684, 1483, 286, 281, 18253, 13, 198, 7061, 6, 198, 198, 4299, 318, 62, 35505, 7, 87, 2599, 198, 197, 7061, 6, 198, 197, 197, 7376, 4657, 262, 2684, 1483, 286, ...
2.347826
115
#!/usr/bin/env python """Plot the performance of different variants of the string routines for one size. """ import libplot import pylab if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 43328, 262, 2854, 286, 1180, 17670, 286, 262, 4731, 31878, 198, 1640, 530, 2546, 13, 198, 37811, 198, 198, 11748, 9195, 29487, 198, 198, 11748, 279, 2645, 397, 628, 198, 36...
3.103448
58
import sqlite3 import time, datetime, random import matplotlib matplotlib.use("Agg") #added due to error, possibly due to install configuration import matplotlib.pyplot as plt print(matplotlib.get_backend()) import matplotlib.dates as mdates from matplotlib import style style.use('fivethirtyeight') conn = sqlite3.connect("part1.db") c = conn.cursor() def select_all_tasks(c): """ Query all rows in the tasks table :param conn: the Connection object :return: """ c.execute("SELECT * FROM stufftoplot") rows = c.fetchall() for row in rows: print(row) create_table() #data_entry() #data_insert(1111, "2016-01-02", "more keywords", 1) #data_insert(2222, "2016-01-03", "less keywords", 2) #dynamic_data_entry() # time.sleep(1) #select_all_tasks(c) #read_from_db() #graph_data() create_n_rows(10) del_and_update() c.close() conn.close()
[ 11748, 44161, 578, 18, 198, 11748, 640, 11, 4818, 8079, 11, 4738, 198, 11748, 2603, 29487, 8019, 198, 6759, 29487, 8019, 13, 1904, 7203, 46384, 4943, 198, 2, 29373, 2233, 284, 4049, 11, 5457, 2233, 284, 2721, 8398, 198, 198, 11748, 26...
2.553977
352
import json import os from urllib.parse import parse_qs from urllib.parse import urlparse import pytest import responses from freezegun import freeze_time from jwkest import BadSignature from jwkest.jwk import SYMKey from oic.oauth2.message import MissingSigningKey from oic.oauth2.message import WrongSigningAlgorithm from oic.oic import DEF_SIGN_ALG from oic.oic import Server from oic.oic import response_types_to_grant_types from oic.oic.consumer import IGNORE from oic.oic.consumer import Consumer from oic.oic.consumer import clean_response from oic.oic.message import AccessTokenRequest from oic.oic.message import AccessTokenResponse from oic.oic.message import AuthorizationResponse from oic.oic.message import IdToken from oic.oic.message import OpenIDSchema from oic.oic.message import ProviderConfigurationResponse from oic.oic.message import RegistrationResponse from oic.utils.authn.client import CLIENT_AUTHN_METHOD from oic.utils.keyio import KeyBundle from oic.utils.keyio import KeyJar from oic.utils.keyio import keybundle_from_local_file from oic.utils.sdb import DictSessionBackend from oic.utils.sdb import session_get from oic.utils.time_util import utc_time_sans_frac __author__ = "rohe0002" KC_SYM_VS = KeyBundle({"kty": "oct", "key": "abcdefghijklmnop", "use": "ver"}) KC_SYM_S = KeyBundle({"kty": "oct", "key": "abcdefghijklmnop", "use": "sig"}) BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "data/keys")) KC_RSA = keybundle_from_local_file( os.path.join(BASE_PATH, "rsa.key"), "rsa", ["ver", "sig"] ) SRVKEYS = KeyJar() SRVKEYS[""] = [KC_RSA] SRVKEYS["client_1"] = [KC_SYM_VS, KC_RSA] CLIKEYS = KeyJar() CLIKEYS["http://localhost:8088"] = [KC_RSA] CLIKEYS[""] = [KC_RSA, KC_SYM_VS] CLIKEYS["https://example.com"] = [KC_RSA] SERVER_INFO = { "version": "3.0", "issuer": "https://localhost:8088", "authorization_endpoint": "http://localhost:8088/authorization", "token_endpoint": "http://localhost:8088/token", "userinfo_endpoint": "http://localhost:8088/userinfo", "flows_supported": ["code", "token"], } CONFIG = { "authz_page": "authz", "scope": ["openid"], "response_type": "code", "request_method": "parameter", "password": "hemligt", "max_age": 3600, "user_info": {"name": None}, }
[ 11748, 33918, 198, 11748, 28686, 198, 6738, 2956, 297, 571, 13, 29572, 1330, 21136, 62, 48382, 198, 6738, 2956, 297, 571, 13, 29572, 1330, 19016, 29572, 198, 198, 11748, 12972, 9288, 198, 11748, 9109, 198, 6738, 1479, 89, 1533, 403, 133...
2.60339
885
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import io from codecs import open from setuptools import setup, find_packages REQUIRES = [ 'appdirs>=1.4.3,<2.0', 'carto>=1.6.0,<2.0', 'jinja2>=2.10.1,<3.0', 'pandas>=0.24.2<1.0', 'shapely>=1.6.4,<2.0', 'tqdm>=4.32.1,<5.0', 'unidecode>=1.1.0,<2.0', 'webcolors>=1.9.1,<2.0' ] PACKAGE_DATA = { '': [ 'LICENSE', 'CONTRIBUTORS', ], 'cartoframes': [ 'assets/*', 'assets/*.j2' ] + walk_subpkg('assets'), } here = os.path.abspath(os.path.dirname(__file__)) with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = '\n' + f.read() about = {} with open(os.path.join(here, 'cartoframes', '__version__.py'), 'r', 'utf-8') as f: exec(f.read(), about) setup( name=about['__title__'], version=about['__version__'], description=about['__description__'], long_description=long_description, url=about['__url__'], author=about['__author__'], author_email=about['__email__'], license=about['__license__'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7' ], keywords='carto data science maps spatial pandas', packages=find_packages(), install_requires=REQUIRES, python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", include_package_data=True, package_dir={'cartoframes': 'cartoframes'}, package_data=PACKAGE_DATA, )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 28686, 198, 11748, 33245, 198, 6738, 40481, 82, 1330, 1280, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1...
2.185919
909
#!/usr/bin/env python # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Run BioSQL tests using MySQL.""" import unittest # Really do want "import *" to get all the test clases: from common_BioSQL import * # noqa: F403 from common_BioSQL_online import * # noqa: F403 # Import these explicitly to avoid flake8 F405 below: from common_BioSQL import load_biosql_ini, check_config from common_BioSQL_online import share_config import requires_internet requires_internet.check() DBDRIVER = "mysql.connector" DBTYPE = "mysql" DBHOST, DBUSER, DBPASSWD, TESTDB = load_biosql_ini(DBTYPE) # This will abort if driver not installed etc: check_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB) share_config(DBDRIVER, DBTYPE, DBHOST, DBUSER, DBPASSWD, TESTDB) if __name__ == "__main__": # Run the test cases runner = unittest.TextTestRunner(verbosity=2) unittest.main(testRunner=runner)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 770, 2438, 318, 636, 286, 262, 8436, 404, 7535, 6082, 290, 21825, 416, 663, 198, 2, 5964, 13, 220, 4222, 766, 262, 38559, 24290, 2393, 326, 815, 423, 587, 3017, 198, 2, 355, 636...
3.008798
341
# Generated by Django 2.2.6 on 2020-04-05 07:50 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 21, 319, 12131, 12, 3023, 12, 2713, 8753, 25, 1120, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# Copyright (C) 2015 UCSC Computational Genomics Lab # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from abc import ABCMeta, abstractmethod from contextlib import contextmanager import re try: import cPickle except ImportError: import pickle as cPickle ########################################## #The following methods deal with creating/loading/updating/writing/checking for the #existence of jobs ########################################## def jobs(self): """ Returns iterator on the jobs in the store. :rtype : iterator """ raise NotImplementedError( ) ########################################## #The following provide an way of creating/reading/writing/updating files #associated with a given job. ########################################## ########################################## #The following methods deal with shared files, i.e. files not associated #with specific jobs. ########################################## sharedFileNameRegex = re.compile( r'^[a-zA-Z0-9._-]+$' ) # FIXME: Rename to updateSharedFileStream ## Helper methods for subclasses def _defaultTryCount( self ): return int( self.config.retryCount+1 )
[ 2, 15069, 357, 34, 8, 1853, 14417, 6173, 22476, 864, 5215, 31994, 3498, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.32
550
from datetime import date import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html import pandas as pd import plotly.express as px from dash.dependencies import Input, Output test_data = pd.read_csv("data/world_data.csv") today = date.today() external_stylesheets = [dbc.themes.BOOTSTRAP] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.title = "COVID Dashboard - UK Edition" app.layout = html.Div([ html.Nav(className="navbar navbar-dark fixed-top bg-dark flex-md-nowrap p-0 shadow", children=[ html.A(className="navbar-brand col-sm-3 col-md-2 mr-0", children="COVID-19"), # dcc.DatePickerRange(className="date-and-location", # id="month-picker", # min_date_allowed=date(2020, 1, 30), # max_date_allowed=date(today.year, today.month, today.day), # start_date=date(2020, 3, 1), # end_date=date(today.year, today.month, today.day), # style={"height": "50%"} # ), ]), html.Div(className="container-fluid", children=[ html.Div(className="row", children=[ html.Nav(className="col-md-2 d-none d-md-block bg-light sidebar", children=[ html.Div(className="sidebar-sticky", children=[ html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[ html.Span("Custom Search"), ]), html.Ul(className="nav flex-column", children=[ html.Li(className="nav-item", children=[ dcc.Link("User Search", href="/home"), ])]), html.H6(className="sidebar-heading d-flex px-3 mt-4 mb-1 text-muted", children=[ html.Span("Preset Search"), ]), dcc.Location(id="url", refresh=False), html.Ul(className="nav flex-column", children=[ html.Li(className="nav-item", children=[ dcc.Link("Africa", href="/africa"), html.Span(className="sr-only"), ]), html.Li(className="nav-item", children=[ dcc.Link("Asia", href="/asia"), html.Span(className="sr-only"), ]), html.Li(className="nav-item", children=[ dcc.Link("Europe", href="/europe"), html.Span(className="sr-only"), ]), html.Li(className="nav-item", children=[ dcc.Link("North America", href="/northamerica"), html.Span(className="sr-only"), ]), html.Li(className="nav-item", children=[ dcc.Link("South America", href="/southamerica"), html.Span(className="sr-only"), ]), html.Li(className="nav-item", children=[ dcc.Link("Oceania", href="/oceania"), html.Span(className="sr-only"), ]), ]), html.Div(id='page-content'), html.Ul(className="nav flex-column mb-2") ]), ]), html.Main(role="main", className="col-md-9 ml-sm-auto col-lg-10 px-4", children=[ html.Div(className="chartjs-size-monitor", style={"position": "absolute", "left": "0px", "top": "0px", "right": "0px", "bottom": "0px", "overflow": "hidden", "pointer-events": "none", "visibility": "hidden", "z-index": "-1"}), html.Div(className="box-shadow", children=[ ]), dbc.Row( [ dbc.Col(children=[ html.H1(children="Deaths"), html.Hr(className="lead"), html.Div(id="death-stats", children="######"), ]), dbc.Col(children=[ html.H1(children="Cases"), html.Hr(className="lead"), html.Div(id="cases-stats", children="######"), ]), dbc.Col(children=[ html.H1(children="Vaccines"), html.Hr(className="lead"), html.Div(id="vaccines-stats", children="######"), ]), ] ), html.Div(className="graphs", children=[ dcc.Graph( id="cases-graph" ), dcc.Graph( id="deaths-graph", ), ]), ])])])]) if __name__ == "__main__": app.run_server(debug=True, dev_tools_ui=False)
[ 6738, 4818, 8079, 1330, 3128, 198, 11748, 14470, 198, 11748, 14470, 62, 18769, 26418, 62, 5589, 3906, 355, 288, 15630, 198, 11748, 14470, 62, 7295, 62, 5589, 3906, 355, 288, 535, 198, 11748, 14470, 62, 6494, 62, 5589, 3906, 355, 27711, ...
1.49442
4,391
from collections import OrderedDict import skimage.io as io from config import get_config config = get_config() _cache = LRUCache(config["data_queue_len"])
[ 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 198, 11748, 1341, 9060, 13, 952, 355, 33245, 198, 198, 6738, 4566, 1330, 651, 62, 11250, 198, 198, 11250, 796, 651, 62, 11250, 3419, 628, 198, 198, 62, 23870, 796, 37491, 9598, 4891, 7, ...
3.134615
52
import numpy from heapq import * import time
[ 11748, 299, 32152, 198, 6738, 24575, 80, 1330, 1635, 198, 11748, 640, 628, 198 ]
3.357143
14
import logging import time from qupy.framing.slip import Slip from qupy.interface.serial import SerialPort from qupy.interface.errors import InterfaceTimeoutError, InterfaceIOError, InterfaceError from qupy.comm.client import CommClient logging.basicConfig(level=logging.DEBUG) if __name__ == '__main__': s = SerialPort() f = Slip() c = CommClient(s, f) connect = True while True: if connect: try: s.open() except InterfaceIOError as e: time.sleep(1.0) continue c.start() connect = False try: print('ask...') data = input() d = c.ask(data.encode('utf-8')) print('data:',d) if len(d) > 0 and d[0] == ord('p'): break except InterfaceIOError as e: print('ask io error', str(e)) c.stop() s.close() connect = True except InterfaceTimeoutError as e: print('timeout') c.stop() s.close()
[ 11748, 18931, 198, 11748, 640, 198, 198, 6738, 627, 9078, 13, 19298, 278, 13, 6649, 541, 1330, 49988, 198, 6738, 627, 9078, 13, 39994, 13, 46911, 1330, 23283, 13924, 198, 6738, 627, 9078, 13, 39994, 13, 48277, 1330, 26491, 48031, 12331,...
1.949477
574
import torch from transformers import PreTrainedTokenizerFast from transformers import BartForConditionalGeneration abs_summary = AbsSummarization()
[ 11748, 28034, 198, 6738, 6121, 364, 1330, 3771, 2898, 1328, 30642, 7509, 22968, 198, 6738, 6121, 364, 1330, 13167, 1890, 25559, 1859, 8645, 341, 198, 198, 8937, 62, 49736, 796, 13051, 13065, 3876, 1634, 3419 ]
4.257143
35
# -*- coding: utf-8 -*- import tempfile from dp_tornado.engine.helper import Helper as dpHelper
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 628, 198, 11748, 20218, 7753, 198, 198, 6738, 288, 79, 62, 45910, 4533, 13, 18392, 13, 2978, 525, 1330, 5053, 525, 355, 288, 79, 47429, 628 ]
2.564103
39
import requests from bs4 import BeautifulSoup import json import re # Range of Roll Number - User Input start_roll = int(input("Starting Roll Number: ")) end_roll = int(input("Ending Roll Number: ")) # Semester - User Input sem = int(input("Which Semester[1-8]: ")) # Verbosity verbose = int(input("Verbosity Level (1 for just data, 2 for detailed data): ")) # Roll Number Tuple roll_tuple = tuple(range(start_roll, end_roll+1)) # Getting the Websites result_url = 'https://makaut.ucanapply.com/smartexam/public/result-details' get_result_details = 'https://makaut.ucanapply.com/smartexam/public//get-result-details' # Semester Codes semcode = ('SM01', 'SM02', 'SM03', 'SM04', 'SM05', 'SM06', 'SM07', 'SM08') if verbose == 1: # Disply most recent for roll in roll_tuple: data = get_marks_of(roll, sem) try: print(f"({data['name']}, {data['sgpa_odd' if sem%2!=0 else 'sgpa_even']})") except: pass elif verbose == 2: for roll in roll_tuple: print_marks_properly(roll, sem) else: print("[!] Verbosity Level Wrong!")
[ 11748, 7007, 201, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 201, 198, 11748, 33918, 201, 198, 11748, 302, 201, 198, 201, 198, 2, 13667, 286, 8299, 7913, 532, 11787, 23412, 201, 198, 9688, 62, 2487, 796, 493, 7, 15414, 7203, 22...
2.389006
473
#!/usr/bin/env python __author__ = "Adeel Ahmad" __email__ = "adeelahmad14@hotmail.com" __status__ = "Production" import matplotlib.pyplot as plt import numpy as np import skimage as ski import Image def cartoonify(im, display=False): """ function receives an image and add its gradient magnitude in it and add it to the original image to return a semi-cartoon image. Note: You will have to scale the gradient-magnitue image before adding it back to the input image. Input: im: input image to cartoonify display: whether to display image or not... NOTE: This function expects a gaussian filtered image """ kernel, kern_size = np.array([[-1,-1,-1] ,[0,0,0] ,[1,1,1]]), 3 gx, gy = np.zeros_like(im, dtype=float), np.zeros_like(im, dtype=float) for i in range(im.shape[0] - (kern_size-1)): for j in range(im.shape[1] - (kern_size-1)): window = im[i:i + kern_size, j:j + kern_size] gx[i,j], gy[i,j] = np.sum(window * kernel.T), np.sum(window * kernel) magnitude = np.sqrt(gx**2 + gy**2) magnitude = magnitude.astype(np.int64, copy=False) cartoon = im + (im + magnitude) if display == 1: plt.imshow(cartoon, cmap='gray') plt.suptitle('Cartoon') plt.show() return cartoon
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 834, 9800, 834, 796, 366, 2782, 68, 417, 24152, 1, 198, 834, 12888, 834, 796, 366, 671, 417, 993, 9937, 1415, 31, 8940, 4529, 13, 785, 1, 198, 834, 13376, 834, 796, 366, 35027...
2.125
664
from dataclasses import dataclass import json import re
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 11748, 33918, 198, 11748, 302, 198 ]
3.733333
15
# -*- coding: utf-8 -*- """Demo37_PythonforDataScience.ipynb # PYTHON FOR DATA SCIENCE We will take our python programming skills a step further and process large data in it. Python is an excellent language for deployment. Hence we will be using open source data during the learning process!! This will make sure we understand the challenges a Data Scientist can face and how to deal with them. In my experience, Data Preprocessing takes 70% of the time in any project. Hence it is crucial for any Data Scientist to know what it is and how it is done. This may be the boring portion of the course but I assure you, you will feel accomplished by the end of this tutorial. - Python Basics - Object Oriented Python - **Python for Data Science** - NumPy - Pandas - Plotting - Matplotlib - Seaborn Let's get coding !! """ #Variables can not start with a number 12var = 1 _13var = 1 name = "Mahnoor" surname = "Anjum" age = 21 print("I'm {} {} and I am {} years old.".format(name, surname, age)) name = "Mahnoor" surname = "Anjum" age = 21 print("I'm {_1} {_2} and I am {_3} years old.".format(_1 = name, _2= surname, _3 = age)) """### INDEXING AND SLICING One of the most important Python concept for data scientists is the slicing operator ':' """ str = "ONE TWO THREE FOUR FIVE" print(str[0]) print(str[5]) print(str[len(str)-1]) str[:5] str[5:] str[1]="a" nested = [1,2,3,['_1','_2','_3',['__1']]] nested[0] nested[3][0] len(nested) len(nested[3]) nested[3][3] nested[3][3][0] dict = {'key1':'value1', \ 'key2': 'value2', \ 'key3':'value3'} dict['key1'] T = True F = False var = 10 for i in range(var): print(i) for i in range(var): bool = (i==2) if bool: break print(i) [1,2,3,1,1,2,3,4] (1,2,3,1,1,2,3,4) {1,2,3,1,1,2,3,4} new_set = set([1,2,3,1,1,2,3,4]) new_set.add(5) new_set for item in new_set: print(item) list(range(4)) my_list = list(range(5,10)) output = [] for number in my_list: output.append(number**3) output output = [num**3 for num in my_list] output """### FUNCTIONS""" my_function("Jalebi (Hungry okay?)") my_function() num = 4 change(num) num num = 4 change(num) num num = [4] change(num) num my_list """### LAMBDA EXPRESSIONS""" list(map(square, my_list)) list(map(lambda x:x*x, my_list)) """### BUILT-IN FUNCTIONS""" s = "We have a hulk !!!" s.lower() s.upper() s.split() dict = {'key1':1,'key2':2} dict.keys() dict.values() dict.items() my_list.pop() my_list """### TUPLE UNPACKING""" list_of_tuples =[(1,2),(3,4),(5,6)] for (a,b) in list_of_tuples: print (a) print (b) """### WELCOME TO THE END OF THE TUTORIAL You made it!! Hope you enjoyed taking this tutorial as much as I enjoyed coding it. From the next tutorial, we will be starting our first Data Science Library called NumPy. Until then, happy coding. --------------------------------------------------------------------------------- Copyrights 2018, All Rights Reserved. - Author: Mahnoor Anjum. - Course: The Complete Hands-On Machine Learning Course - Date Created: 2018-06-27 - Date Modified: - """
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 11522, 78, 2718, 62, 37906, 1640, 6601, 26959, 13, 541, 2047, 65, 198, 198, 2, 350, 56, 4221, 1340, 7473, 42865, 6374, 42589, 198, 1135, 481, 1011, 674, 21015, ...
2.558848
1,215
# Copyright 2017 QuantRocket - All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from quantrocket.houston import houston from quantrocket.cli.utils.output import json_to_cli def list_databases(service=None): """ List databases. Parameters ---------- service : str, optional only list databases for this service Returns ------- list list of databases """ params = {} if service: params["service"] = service response = houston.get("/db/databases", params=params) houston.raise_for_status_with_json(response) return response.json() def download_database(database, outfile): """ Download a database from the db service and write to a local file. Parameters ---------- database : str, required the filename of the database (as returned by the list_databases) outfile: str, required filename to write the database to Returns ------- None """ response = houston.get("/db/databases/{0}".format(database), stream=True) houston.raise_for_status_with_json(response) with open(outfile, "wb") as f: for chunk in response.iter_content(chunk_size=1024): if chunk: f.write(chunk) def s3_push_databases(service, codes=None): """ Push database(s) to Amazon S3. Parameters ---------- serivce : str, required only push databases for this service (specify 'all' to push all services) codes: list of str, optional only push databases identified by these codes (omit to push all databases for service) Returns ------- json status message """ data = {} if codes: data["codes"] = codes response = houston.put("/db/s3/{0}".format(service), data=data) houston.raise_for_status_with_json(response) return response.json() def s3_pull_databases(service, codes=None, force=False): """ Pull database(s) from Amazon S3 to the db service. Parameters ---------- serivce : str, required only pull databases for this service (specify 'all' to pull all services) codes: list of str, optional only pull databases identified by these codes (omit to pull all databases for service) force: bool overwrite existing database if one exists (default is to fail if one exists) Returns ------- json status message """ params = {} if codes: params["codes"] = codes if force: params["force"] = force response = houston.get("/db/s3/{0}".format(service), params=params) houston.raise_for_status_with_json(response) return response.json() def optimize_databases(service, codes=None): """ Optimize database file(s) to improve performance. Parameters ---------- serivce : str, required only optimize databases for this service (specify 'all' to optimize all services) codes: list of str, optional only optimize databases identified by these codes (omit to optimize all databases for service) Returns ------- json status message """ data = {} if codes: data["codes"] = codes response = houston.post("/db/optimizations/{0}".format(service), data=data) houston.raise_for_status_with_json(response) return response.json()
[ 2, 15069, 2177, 16972, 50218, 532, 1439, 6923, 33876, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13,...
2.867701
1,353
from ink2canvas.lib.simpletransform import parseTransform
[ 6738, 16882, 17, 5171, 11017, 13, 8019, 13, 36439, 35636, 1330, 21136, 41762 ]
4.384615
13
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Kay management script. :Copyright: (c) 2009 Accense Technology, Inc. All rights reserved. :license: BSD, see LICENSE for more details. """ import sys import os import logging sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path import kay kay.setup_env(manage_py_env=True) from werkzeug import script from kay.management import * import appengine_config if __name__ == '__main__': if len(sys.argv) == 1: sys.argv.append("--help") script.run()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 37247, 4542, 4226, 13, 198, 198, 25, 15269, 25, 357, 66, 8, 3717, 6366, 1072, 8987, 11, 3457, 13, ...
2.631841
201
from pymicro.crystal.microstructure import * from pymicro.crystal.texture import * from pymicro.examples import PYMICRO_EXAMPLES_DATA_DIR from matplotlib import pyplot as plt, colors, colorbar, cm import pathlib as pl '''This example demonstrate how a field can be used to color each symbol on the pole figure with the :py:meth:~`pymicro.crystal.texture.set_map_field` method. ''' #orientations = Orientation.read_euler_txt('../data/orientation_set.inp') #for i in range(600): # micro.grains.append(Grain(i, orientations[i + 1])) euler_list = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'orientation_set.inp').tolist() micro = Microstructure(name='field', autodelete=True) micro.add_grains(euler_list) # load strain from dat files strain_field = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'strain_avg_per_grain.dat')[19, ::2] # build custom pole figures pf = PoleFigure(microstructure=micro) pf.mksize = 40 pf.set_map_field('strain', strain_field, field_min_level=0.015, field_max_level=0.025) fig = plt.figure() # direct PF ax1 = fig.add_axes([0.05, 0.05, 0.8, 0.9], aspect='equal') pf.plot_pf(ax=ax1) plt.title('111 pole figure, cubic elasticity') # to add the color bar ax2 = fig.add_axes([0.8, 0.05, 0.05, 0.9]) norm = colors.Normalize(vmin=0.015, vmax=0.025) cb = colorbar.ColorbarBase(ax2, cmap=cm.hot, norm=norm, orientation='vertical') cb.set_label('Average strain (mm/mm)') image_name = os.path.splitext(__file__)[0] + '.png' print('writing %s' % image_name) plt.savefig('%s' % image_name, format='png') del pf del micro from matplotlib import image image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
[ 6738, 279, 4948, 2500, 13, 20470, 7757, 13, 24055, 301, 5620, 1330, 1635, 198, 6738, 279, 4948, 2500, 13, 20470, 7757, 13, 41293, 1330, 1635, 198, 6738, 279, 4948, 2500, 13, 1069, 12629, 1330, 350, 56, 49884, 13252, 62, 6369, 2390, 64...
2.529595
642
import time import sys import os import numpy as np import torch import torch.nn as nn import torchvision.models as models from torch.nn.utils.rnn import pack_padded_sequence from model.base_torch import BaseModel from model.utils.general import init_dir, get_logger from model.utils.general import Progbar from model.utils.general import Config from model.utils.general import minibatches from model.components.SimpleCNN import SimpleCNN from model.components.ResNet import ResNet9 from model.components.DenseNet import DenseNet169 from model.components.seq2seq_torch import EncoderCNN, DecoderWithAttention, Img2Seq from model.evaluation.text import score_files, truncate_end, write_answers from model.utils.image import pad_batch_images_2 from model.utils.text import pad_batch_formulas from torch.utils.data import Dataset import h5py import json from model.utils.data_generator import DataGenerator
[ 11748, 640, 201, 198, 11748, 25064, 201, 198, 11748, 28686, 201, 198, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 28034, 201, 198, 11748, 28034, 13, 20471, 355, 299, 77, 201, 198, 11748, 28034, 10178, 13, 27530, 355, 4981,...
3.154362
298
#!/usr/bin/python # Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file # for details. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. """This entry point runs all script tests.""" import logging.config import unittest if __name__ == '__main__': logging.config.fileConfig('logging.conf') suite = unittest.TestLoader().loadTestsFromNames([ 'templateloader_test', 'pegparser_test', 'idlparser_test', 'idlnode_test', 'idlrenderer_test', 'database_test', 'databasebuilder_test', 'emitter_test', 'dartgenerator_test', 'multiemitter_test' ]) unittest.TextTestRunner().run(suite)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 15069, 357, 66, 8, 2813, 11, 262, 29032, 1628, 7035, 13, 220, 4222, 766, 262, 37195, 20673, 2393, 198, 2, 329, 3307, 13, 1439, 2489, 10395, 13, 5765, 286, 428, 2723, 2438, 318, 21825, ...
2.853175
252
""" 11367. Report Card Time : xCrypt0r : Python 3 : 29,380 KB : 64 ms : 2020 9 18 """ if __name__ == '__main__': main()
[ 37811, 198, 1157, 27824, 13, 6358, 5172, 3862, 198, 198, 25, 2124, 23919, 15, 81, 198, 25, 11361, 513, 198, 1058, 2808, 11, 23734, 14204, 198, 1058, 5598, 13845, 198, 1058, 12131, 860, 1248, 198, 37811, 198, 198, 361, 11593, 3672, 834...
2.363636
55
from __future__ import print_function, division, absolute_import import copy import numpy as np import skimage.draw import skimage.measure from .. import imgaug as ia from .utils import normalize_shape, project_coords # TODO functions: square(), to_aspect_ratio(), contains_point() # TODO add tests for ndarray inputs def project(self, from_shape, to_shape): """Project the bounding box onto a differently shaped image. E.g. if the bounding box is on its original image at ``x1=(10 of 100 pixels)`` and ``y1=(20 of 100 pixels)`` and is projected onto a new image with size ``(width=200, height=200)``, its new position will be ``(x1=20, y1=40)``. (Analogous for ``x2``/``y2``.) This is intended for cases where the original image is resized. It cannot be used for more complex changes (e.g. padding, cropping). Parameters ---------- from_shape : tuple of int or ndarray Shape of the original image. (Before resize.) to_shape : tuple of int or ndarray Shape of the new image. (After resize.) Returns ------- imgaug.augmentables.bbs.BoundingBox ``BoundingBox`` instance with new coordinates. """ coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)], from_shape, to_shape) return self.copy( x1=coords_proj[0][0], y1=coords_proj[0][1], x2=coords_proj[1][0], y2=coords_proj[1][1], label=self.label) def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0): """Extend the size of the bounding box along its sides. Parameters ---------- all_sides : number, optional Value by which to extend the bounding box size along all sides. top : number, optional Value by which to extend the bounding box size along its top side. right : number, optional Value by which to extend the bounding box size along its right side. bottom : number, optional Value by which to extend the bounding box size along its bottom side. left : number, optional Value by which to extend the bounding box size along its left side. Returns ------- imgaug.BoundingBox Extended bounding box. """ return BoundingBox( x1=self.x1 - all_sides - left, x2=self.x2 + all_sides + right, y1=self.y1 - all_sides - top, y2=self.y2 + all_sides + bottom ) def intersection(self, other, default=None): """Compute the intersection BB between this BB and another BB. Note that in extreme cases, the intersection can be a single point. In that case the intersection bounding box exists and it will be returned, but it will have a height and width of zero. Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox Other bounding box with which to generate the intersection. default : any, optional Default value to return if there is no intersection. Returns ------- imgaug.augmentables.bbs.BoundingBox or any Intersection bounding box of the two bounding boxes if there is an intersection. If there is no intersection, the default value will be returned, which can by anything. """ x1_i = max(self.x1, other.x1) y1_i = max(self.y1, other.y1) x2_i = min(self.x2, other.x2) y2_i = min(self.y2, other.y2) if x1_i > x2_i or y1_i > y2_i: return default else: return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i) def union(self, other): """Compute the union BB between this BB and another BB. This is equivalent to drawing a bounding box around all corner points of both bounding boxes. Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox Other bounding box with which to generate the union. Returns ------- imgaug.augmentables.bbs.BoundingBox Union bounding box of the two bounding boxes. """ return BoundingBox( x1=min(self.x1, other.x1), y1=min(self.y1, other.y1), x2=max(self.x2, other.x2), y2=max(self.y2, other.y2), ) def iou(self, other): """Compute the IoU between this bounding box and another one. IoU is the intersection over union, defined as:: ``area(intersection(A, B)) / area(union(A, B))`` ``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))`` Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox Other bounding box with which to compare. Returns ------- float IoU between the two bounding boxes. """ inters = self.intersection(other) if inters is None: return 0.0 area_union = self.area + other.area - inters.area return inters.area / area_union if area_union > 0 else 0.0 def is_fully_within_image(self, image): """Estimate whether the bounding box is fully inside the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- bool ``True`` if the bounding box is fully inside the image area. ``False`` otherwise. """ shape = normalize_shape(image) height, width = shape[0:2] return ( self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height) def is_partly_within_image(self, image): """Estimate whether the BB is at least partially inside the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- bool ``True`` if the bounding box is at least partially inside the image area. ``False`` otherwise. """ shape = normalize_shape(image) height, width = shape[0:2] eps = np.finfo(np.float32).eps img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps) return self.intersection(img_bb) is not None def is_out_of_image(self, image, fully=True, partly=False): """Estimate whether the BB is partially/fully outside of the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. fully : bool, optional Whether to return ``True`` if the bounding box is fully outside of the image area. partly : bool, optional Whether to return ``True`` if the bounding box is at least partially outside fo the image area. Returns ------- bool ``True`` if the bounding box is partially/fully outside of the image area, depending on defined parameters. ``False`` otherwise. """ if self.is_fully_within_image(image): return False elif self.is_partly_within_image(image): return partly return fully def clip_out_of_image(self, image): """Clip off all parts of the BB box that are outside of the image. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use for the clipping of the bounding box. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- imgaug.augmentables.bbs.BoundingBox Bounding box, clipped to fall within the image dimensions. """ shape = normalize_shape(image) height, width = shape[0:2] assert height > 0, ( "Expected image with height>0, got shape %s." % (image.shape,)) assert width > 0, ( "Expected image with width>0, got shape %s." % (image.shape,)) eps = np.finfo(np.float32).eps x1 = np.clip(self.x1, 0, width - eps) x2 = np.clip(self.x2, 0, width - eps) y1 = np.clip(self.y1, 0, height - eps) y2 = np.clip(self.y2, 0, height - eps) return self.copy( x1=x1, y1=y1, x2=x2, y2=y2, label=self.label ) # TODO convert this to x/y params? def shift(self, top=None, right=None, bottom=None, left=None): """Move this bounding box along the x/y-axis. Parameters ---------- top : None or int, optional Amount of pixels by which to shift this object *from* the top (towards the bottom). right : None or int, optional Amount of pixels by which to shift this object *from* the right (towards the left). bottom : None or int, optional Amount of pixels by which to shift this object *from* the bottom (towards the top). left : None or int, optional Amount of pixels by which to shift this object *from* the left (towards the right). Returns ------- imgaug.augmentables.bbs.BoundingBox Shifted bounding box. """ top = top if top is not None else 0 right = right if right is not None else 0 bottom = bottom if bottom is not None else 0 left = left if left is not None else 0 return self.copy( x1=self.x1+left-right, x2=self.x2+left-right, y1=self.y1+top-bottom, y2=self.y2+top-bottom ) # TODO add explicit test for zero-sized BBs (worked when tested by hand) def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1, copy=True, raise_if_out_of_image=False, thickness=None): """Draw the bounding box on an image. Parameters ---------- image : (H,W,C) ndarray The image onto which to draw the bounding box. Currently expected to be ``uint8``. color : iterable of int, optional The color to use, corresponding to the channel layout of the image. Usually RGB. alpha : float, optional The transparency of the drawn bounding box, where ``1.0`` denotes no transparency and ``0.0`` is invisible. size : int, optional The thickness of the bounding box in pixels. If the value is larger than ``1``, then additional pixels will be added around the bounding box (i.e. extension towards the outside). copy : bool, optional Whether to copy the input image or change it in-place. raise_if_out_of_image : bool, optional Whether to raise an error if the bounding box is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. thickness : None or int, optional Deprecated. Returns ------- (H,W,C) ndarray(uint8) Image with bounding box drawn on it. """ if thickness is not None: ia.warn_deprecated( "Usage of argument 'thickness' in BoundingBox.draw_on_image() " "is deprecated. The argument was renamed to 'size'.") size = thickness if raise_if_out_of_image and self.is_out_of_image(image): raise Exception( "Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f " "on image with shape %s." % ( self.x1, self.y1, self.x2, self.y2, image.shape)) result = np.copy(image) if copy else image if isinstance(color, (tuple, list)): color = np.uint8(color) for i in range(size): y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int # When y values get into the range (H-0.5, H), the *_int functions # round them to H. That is technically sensible, but in the case # of drawing means that the border lies just barely outside of # the image, making the border disappear, even though the BB is # fully inside the image. Here we correct for that because of # beauty reasons. Same is the case for x coordinates. if self.is_fully_within_image(image): y1 = np.clip(y1, 0, image.shape[0]-1) y2 = np.clip(y2, 0, image.shape[0]-1) x1 = np.clip(x1, 0, image.shape[1]-1) x2 = np.clip(x2, 0, image.shape[1]-1) y = [y1-i, y1-i, y2+i, y2+i] x = [x1-i, x2+i, x2+i, x1-i] rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape) if alpha >= 0.99: result[rr, cc, :] = color else: if ia.is_float_array(result): # TODO use blend_alpha here result[rr, cc, :] = ( (1 - alpha) * result[rr, cc, :] + alpha * color) result = np.clip(result, 0, 255) else: input_dtype = result.dtype result = result.astype(np.float32) result[rr, cc, :] = ( (1 - alpha) * result[rr, cc, :] + alpha * color) result = np.clip(result, 0, 255).astype(input_dtype) return result # TODO add tests for pad and pad_max def extract_from_image(self, image, pad=True, pad_max=None, prevent_zero_size=True): """Extract the image pixels within the bounding box. This function will zero-pad the image if the bounding box is partially/fully outside of the image. Parameters ---------- image : (H,W) ndarray or (H,W,C) ndarray The image from which to extract the pixels within the bounding box. pad : bool, optional Whether to zero-pad the image if the object is partially/fully outside of it. pad_max : None or int, optional The maximum number of pixels that may be zero-paded on any side, i.e. if this has value ``N`` the total maximum of added pixels is ``4*N``. This option exists to prevent extremely large images as a result of single points being moved very far away during augmentation. prevent_zero_size : bool, optional Whether to prevent the height or width of the extracted image from becoming zero. If this is set to ``True`` and the height or width of the bounding box is below ``1``, the height/width will be increased to ``1``. This can be useful to prevent problems, e.g. with image saving or plotting. If it is set to ``False``, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0. Returns ------- (H',W') ndarray or (H',W',C) ndarray Pixels within the bounding box. Zero-padded if the bounding box is partially/fully outside of the image. If `prevent_zero_size` is activated, it is guarantueed that ``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``. """ pad_top = 0 pad_right = 0 pad_bottom = 0 pad_left = 0 height, width = image.shape[0], image.shape[1] x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int # When y values get into the range (H-0.5, H), the *_int functions # round them to H. That is technically sensible, but in the case of # extraction leads to a black border, which is both ugly and # unexpected after calling cut_out_of_image(). Here we correct for # that because of beauty reasons. Same is the case for x coordinates. fully_within = self.is_fully_within_image(image) if fully_within: y1, y2 = np.clip([y1, y2], 0, height-1) x1, x2 = np.clip([x1, x2], 0, width-1) # TODO add test if prevent_zero_size: if abs(x2 - x1) < 1: x2 = x1 + 1 if abs(y2 - y1) < 1: y2 = y1 + 1 if pad: # if the bb is outside of the image area, the following pads the # image first with black pixels until the bb is inside the image # and only then extracts the image area # TODO probably more efficient to initialize an array of zeros # and copy only the portions of the bb into that array that # are natively inside the image area if x1 < 0: pad_left = abs(x1) x2 = x2 + pad_left width = width + pad_left x1 = 0 if y1 < 0: pad_top = abs(y1) y2 = y2 + pad_top height = height + pad_top y1 = 0 if x2 >= width: pad_right = x2 - width if y2 >= height: pad_bottom = y2 - height paddings = [pad_top, pad_right, pad_bottom, pad_left] any_padded = any([val > 0 for val in paddings]) if any_padded: if pad_max is None: pad_max = max(paddings) image = ia.pad( image, top=min(pad_top, pad_max), right=min(pad_right, pad_max), bottom=min(pad_bottom, pad_max), left=min(pad_left, pad_max) ) return image[y1:y2, x1:x2] else: within_image = ( (0, 0, 0, 0) <= (x1, y1, x2, y2) < (width, height, width, height) ) out_height, out_width = (y2 - y1), (x2 - x1) nonzero_height = (out_height > 0) nonzero_width = (out_width > 0) if within_image and nonzero_height and nonzero_width: return image[y1:y2, x1:x2] if prevent_zero_size: out_height = 1 out_width = 1 else: out_height = 0 out_width = 0 if image.ndim == 2: return np.zeros((out_height, out_width), dtype=image.dtype) return np.zeros((out_height, out_width, image.shape[-1]), dtype=image.dtype) # TODO also add to_heatmap # TODO add this to BoundingBoxesOnImage def to_keypoints(self): """Convert the BB's corners to keypoints (clockwise, from top left). Returns ------- list of imgaug.augmentables.kps.Keypoint Corners of the bounding box as keypoints. """ # TODO get rid of this deferred import from imgaug.augmentables.kps import Keypoint return [ Keypoint(x=self.x1, y=self.y1), Keypoint(x=self.x2, y=self.y1), Keypoint(x=self.x2, y=self.y2), Keypoint(x=self.x1, y=self.y2) ] def coords_almost_equals(self, other, max_distance=1e-4): """Estimate if this and another BB have almost identical coordinates. Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox or iterable The other bounding box with which to compare this one. If this is an ``iterable``, it is assumed to represent the top-left and bottom-right coordinates of that bounding box, given as e.g. an ``(2,2)`` ndarray or an ``(4,)`` ndarray or as a similar list. max_distance : number, optional The maximum euclidean distance between a corner on one bounding box and the closest corner on the other bounding box. If the distance is exceeded for any such pair, the two BBs are not viewed as equal. Returns ------- bool Whether the two bounding boxes have almost identical corner coordinates. """ if ia.is_np_array(other): # we use flat here in case other is (N,2) instead of (4,) coords_b = other.flat elif ia.is_iterable(other): coords_b = list(ia.flatten(other)) else: assert isinstance(other, BoundingBox), ( "Expected 'other' to be an iterable containing two " "(x,y)-coordinate pairs or a BoundingBox. " "Got type %s." % (type(other),)) coords_b = other.coords.flat coords_a = self.coords return np.allclose(coords_a.flat, coords_b, atol=max_distance, rtol=0) def almost_equals(self, other, max_distance=1e-4): """Compare this and another BB's label and coordinates. This is the same as :func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals` but additionally compares the labels. Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox or iterable The other object to compare against. Expected to be a ``BoundingBox``. max_distance : number, optional See :func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals`. Returns ------- bool ``True`` if the coordinates are almost equal and additionally the labels are equal. Otherwise ``False``. """ if self.label != other.label: return False return self.coords_almost_equals(other, max_distance=max_distance) def copy(self, x1=None, y1=None, x2=None, y2=None, label=None): """Create a shallow copy of this BoundingBox instance. Parameters ---------- x1 : None or number If not ``None``, then the ``x1`` coordinate of the copied object will be set to this value. y1 : None or number If not ``None``, then the ``y1`` coordinate of the copied object will be set to this value. x2 : None or number If not ``None``, then the ``x2`` coordinate of the copied object will be set to this value. y2 : None or number If not ``None``, then the ``y2`` coordinate of the copied object will be set to this value. label : None or string If not ``None``, then the ``label`` of the copied object will be set to this value. Returns ------- imgaug.augmentables.bbs.BoundingBox Shallow copy. """ return BoundingBox( x1=self.x1 if x1 is None else x1, x2=self.x2 if x2 is None else x2, y1=self.y1 if y1 is None else y1, y2=self.y2 if y2 is None else y2, label=copy.deepcopy(self.label) if label is None else label ) def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None): """ Create a deep copy of the BoundingBox object. Parameters ---------- x1 : None or number If not ``None``, then the ``x1`` coordinate of the copied object will be set to this value. y1 : None or number If not ``None``, then the ``y1`` coordinate of the copied object will be set to this value. x2 : None or number If not ``None``, then the ``x2`` coordinate of the copied object will be set to this value. y2 : None or number If not ``None``, then the ``y2`` coordinate of the copied object will be set to this value. label : None or string If not ``None``, then the ``label`` of the copied object will be set to this value. Returns ------- imgaug.augmentables.bbs.BoundingBox Deep copy. """ # TODO write specific copy routine with deepcopy for label and remove # the deepcopy from copy() return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label) class BoundingBoxesOnImage(object): """Container for the list of all bounding boxes on a single image. Parameters ---------- bounding_boxes : list of imgaug.augmentables.bbs.BoundingBox List of bounding boxes on the image. shape : tuple of int or ndarray The shape of the image on which the objects are placed. Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting such an image shape. Examples -------- >>> import numpy as np >>> from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage >>> >>> image = np.zeros((100, 100)) >>> bbs = [ >>> BoundingBox(x1=10, y1=20, x2=20, y2=30), >>> BoundingBox(x1=25, y1=50, x2=30, y2=70) >>> ] >>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape) """ # TODO remove this? here it is image height, but in BoundingBox it is # bounding box height # TODO remove this? here it is image width, but in BoundingBox it is # bounding box width def on(self, image): """Project bounding boxes from one image (shape) to a another one. Parameters ---------- image : ndarray or tuple of int New image onto which the bounding boxes are to be projected. May also simply be that new image's shape tuple. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Object containing the same bounding boxes after projection to the new image shape. """ shape = normalize_shape(image) if shape[0:2] == self.shape[0:2]: return self.deepcopy() bounding_boxes = [bb.project(self.shape, shape) for bb in self.bounding_boxes] return BoundingBoxesOnImage(bounding_boxes, shape) def to_xyxy_array(self, dtype=np.float32): """Convert the ``BoundingBoxesOnImage`` object to an ``(N,4) ndarray``. This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`. Parameters ---------- dtype : numpy.dtype, optional Desired output datatype of the ndarray. Returns ------- ndarray ``(N,4) ndarray``, where ``N`` denotes the number of bounding boxes and ``4`` denotes the top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``. """ xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32) for i, box in enumerate(self.bounding_boxes): xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2] return xyxy_array.astype(dtype) def to_xy_array(self): """Convert the ``BoundingBoxesOnImage`` object to an ``(N,2) ndarray``. Returns ------- ndarray ``(2*B,2) ndarray`` of xy-coordinates, where ``B`` denotes the number of bounding boxes. """ return self.to_xyxy_array().reshape((-1, 2)) def fill_from_xyxy_array_(self, xyxy): """Modify the BB coordinates of this instance in-place. .. note :: This currently expects exactly one entry in `xyxy` per bounding in this instance. (I.e. two corner coordinates per instance.) Otherwise, an ``AssertionError`` will be raised. .. note :: This method will automatically flip x-coordinates if ``x1>x2`` for a bounding box. (Analogous for y-coordinates.) Parameters ---------- xyxy : (N, 4) ndarray or iterable of iterable of number Coordinates of ``N`` bounding boxes on an image, given as a ``(N,4)`` array of two corner xy-coordinates per bounding box. ``N`` must match the number of bounding boxes in this instance. Returns ------- BoundingBoxesOnImage This instance itself, with updated bounding box coordinates. Note that the instance was modified in-place. """ xyxy = np.array(xyxy, dtype=np.float32) # note that np.array([]) is (0,), not (0, 4) assert xyxy.shape[0] == 0 or (xyxy.ndim == 2 and xyxy.shape[-1] == 4), ( "Expected input array to have shape (N,4), " "got shape %s." % (xyxy.shape,)) assert len(xyxy) == len(self.bounding_boxes), ( "Expected to receive an array with as many rows there are " "bounding boxes in this instance. Got %d rows, expected %d." % ( len(xyxy), len(self.bounding_boxes))) for bb, (x1, y1, x2, y2) in zip(self.bounding_boxes, xyxy): bb.x1 = min([x1, x2]) bb.y1 = min([y1, y2]) bb.x2 = max([x1, x2]) bb.y2 = max([y1, y2]) return self def fill_from_xy_array_(self, xy): """Modify the BB coordinates of this instance in-place. See :func:`imgaug.augmentables.bbs.BoundingBoxesOnImage.fill_from_xyxy_array_`. Parameters ---------- xy : (2*B, 2) ndarray or iterable of iterable of number Coordinates of ``B`` bounding boxes on an image, given as a ``(2*B,2)`` array of two corner xy-coordinates per bounding box. ``B`` must match the number of bounding boxes in this instance. Returns ------- BoundingBoxesOnImage This instance itself, with updated bounding box coordinates. Note that the instance was modified in-place. """ xy = np.array(xy, dtype=np.float32) return self.fill_from_xyxy_array_(xy.reshape((-1, 4))) def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1, copy=True, raise_if_out_of_image=False, thickness=None): """Draw all bounding boxes onto a given image. Parameters ---------- image : (H,W,3) ndarray The image onto which to draw the bounding boxes. This image should usually have the same shape as set in ``BoundingBoxesOnImage.shape``. color : int or list of int or tuple of int or (3,) ndarray, optional The RGB color of all bounding boxes. If a single ``int`` ``C``, then that is equivalent to ``(C,C,C)``. alpha : float, optional Alpha/transparency of the bounding box. size : int, optional Thickness in pixels. copy : bool, optional Whether to copy the image before drawing the bounding boxes. raise_if_out_of_image : bool, optional Whether to raise an exception if any bounding box is outside of the image. thickness : None or int, optional Deprecated. Returns ------- (H,W,3) ndarray Image with drawn bounding boxes. """ image = np.copy(image) if copy else image for bb in self.bounding_boxes: image = bb.draw_on_image( image, color=color, alpha=alpha, size=size, copy=False, raise_if_out_of_image=raise_if_out_of_image, thickness=thickness ) return image def remove_out_of_image(self, fully=True, partly=False): """Remove all BBs that are fully/partially outside of the image. Parameters ---------- fully : bool, optional Whether to remove bounding boxes that are fully outside of the image. partly : bool, optional Whether to remove bounding boxes that are partially outside of the image. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Reduced set of bounding boxes, with those that were fully/partially outside of the image being removed. """ bbs_clean = [ bb for bb in self.bounding_boxes if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)] return BoundingBoxesOnImage(bbs_clean, shape=self.shape) def clip_out_of_image(self): """Clip off all parts from all BBs that are outside of the image. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Bounding boxes, clipped to fall within the image dimensions. """ bbs_cut = [ bb.clip_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)] return BoundingBoxesOnImage(bbs_cut, shape=self.shape) def shift(self, top=None, right=None, bottom=None, left=None): """Move all all BBs along the x/y-axis. Parameters ---------- top : None or int, optional Amount of pixels by which to shift all objects *from* the top (towards the bottom). right : None or int, optional Amount of pixels by which to shift all objects *from* the right (towads the left). bottom : None or int, optional Amount of pixels by which to shift all objects *from* the bottom (towards the top). left : None or int, optional Amount of pixels by which to shift all objects *from* the left (towards the right). Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Shifted bounding boxes. """ bbs_new = [ bb.shift(top=top, right=right, bottom=bottom, left=left) for bb in self.bounding_boxes] return BoundingBoxesOnImage(bbs_new, shape=self.shape) def to_keypoints_on_image(self): """Convert the bounding boxes to one ``KeypointsOnImage`` instance. Returns ------- imgaug.augmentables.kps.KeypointsOnImage A keypoints instance containing ``N*4`` coordinates for ``N`` bounding boxes. Order matches the order in ``bounding_boxes``. """ from .kps import KeypointsOnImage # This currently uses 4 points instead of 2 points as the method # is primarily used during augmentation and 4 points are overall # the better choice there. arr = np.zeros((len(self.bounding_boxes), 2*4), dtype=np.float32) for i, box in enumerate(self.bounding_boxes): arr[i] = [ box.x1, box.y1, box.x2, box.y1, box.x2, box.y2, box.x1, box.y2 ] return KeypointsOnImage.from_xy_array( arr.reshape((-1, 2)), shape=self.shape ) def invert_to_keypoints_on_image_(self, kpsoi): """Invert the output of ``to_keypoints_on_image()`` in-place. This function writes in-place into this ``BoundingBoxesOnImage`` instance. Parameters ---------- kpsoi : imgaug.augmentables.kps.KeypointsOnImages Keypoints to convert back to bounding boxes, i.e. the outputs of ``to_keypoints_on_image()``. Returns ------- BoundingBoxesOnImage Bounding boxes container with updated coordinates. Note that the instance is also updated in-place. """ assert len(kpsoi.keypoints) == len(self.bounding_boxes) * 4, ( "Expected %d coordinates, got %d." % ( len(self.bounding_boxes) * 2, len(kpsoi.keypoints))) for i, bb in enumerate(self.bounding_boxes): xx = [kpsoi.keypoints[4*i+0].x, kpsoi.keypoints[4*i+1].x, kpsoi.keypoints[4*i+2].x, kpsoi.keypoints[4*i+3].x] yy = [kpsoi.keypoints[4*i+0].y, kpsoi.keypoints[4*i+1].y, kpsoi.keypoints[4*i+2].y, kpsoi.keypoints[4*i+3].y] bb.x1 = min(xx) bb.y1 = min(yy) bb.x2 = max(xx) bb.y2 = max(yy) self.shape = kpsoi.shape return self def copy(self): """Create a shallow copy of the ``BoundingBoxesOnImage`` instance. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Shallow copy. """ return copy.copy(self) def deepcopy(self): """Create a deep copy of the ``BoundingBoxesOnImage`` object. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Deep copy. """ # Manual copy is far faster than deepcopy for BoundingBoxesOnImage, # so use manual copy here too bbs = [bb.deepcopy() for bb in self.bounding_boxes] return BoundingBoxesOnImage(bbs, tuple(self.shape))
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 11, 7297, 11, 4112, 62, 11748, 198, 198, 11748, 4866, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 1341, 9060, 13, 19334, 198, 11748, 1341, 9060, 13, 1326, 5015, 198, 198, 6738, 1...
2.138771
17,936
#!/usr/bin/env python from twisted.internet import endpoints from twisted.internet import protocol from twisted.internet import defer from twisted.mail import imap4 from scanner_relay.pipeline import Pipeline from scanner_relay.authentication import PassStoreFetcher, PlainPasswordFetcher import logging # Global configuration for the logging. Note that we set the level to # INFO so that only DEBUG logging does not get to stdout. FORMAT = '[%(levelname)s] (%(name)s) %(message)s' logging.basicConfig(level=logging.INFO, format=FORMAT) logger = logging.getLogger('run') # TODO(breakds): And a more graceful (singal handling) way to terminate the program. def clean_up(unused): from twisted.internet import reactor reactor.stop() print('All workd done!') if __name__ == '__main__': # FIXME: Make these configurable hostname = 'mail.breakds.org' username = 'bds@breakds.org'.encode('ascii') pass_store_entry = 'mail.breakds.org/bds' port = 143 from twisted.internet import reactor endpoint = endpoints.HostnameEndpoint(reactor, hostname, port) factory = ScannerRelayProtocolFactory( username, PassStoreFetcher(pass_store_entry), clean_up) endpoint.connect(factory) reactor.run()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 19074, 13, 37675, 1330, 886, 13033, 198, 6738, 19074, 13, 37675, 1330, 8435, 198, 6738, 19074, 13, 37675, 1330, 29135, 198, 6738, 19074, 13, 4529, 1330, 545, 499, 19, 198, 1...
3.05122
410
""" Created by akiselev on 2019-06-14 There is a horizontal row of cubes. The length of each cube is given. You need to create a new vertical pile of cubes. The new pile should follow these directions: if is on top of then . When stacking the cubes, you can only pick up either the leftmost or the rightmost cube each time. Print "Yes" if it is possible to stack the cubes. Otherwise, print "No". Do not print the quotation marks. Input Format The first line contains a single integer , the number of test cases. For each test case, there are lines. The first line of each test case contains , the number of cubes. The second line contains space separated integers, denoting the sideLengths of each cube in that order. Constraints Output Format For each test case, output a single line containing either "Yes" or "No" without the quotes. Sample Input 2 6 4 3 2 1 3 4 3 1 3 2 Sample Output Yes No """ for T in range(int(input())): n = int(input()) cubes_h = list(map(int, input().split())) i = 0 while i < n - 1 and cubes_h[i] >= cubes_h[i+1]: i += 1 while i < n - 1 and cubes_h[i] <= cubes_h[i+1]: i += 1 print("Yes" if i == n - 1 else "No")
[ 37811, 628, 15622, 416, 47594, 786, 2768, 319, 13130, 12, 3312, 12, 1415, 198, 220, 198, 1318, 318, 257, 16021, 5752, 286, 34896, 13, 383, 4129, 286, 1123, 23441, 318, 1813, 13, 921, 761, 284, 2251, 257, 649, 11723, 14540, 286, 34896,...
3.105943
387
# # # File: flask_web_py3.py # # # import os import json import redis import urllib import flask from flask import Flask from flask import render_template,jsonify from flask_httpauth import HTTPDigestAuth from flask import request, session, url_for from redis_support_py3.graph_query_support_py3 import Query_Support from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers from web_core.load_static_pages_py3 import Load_Static_Files from web_core.load_redis_access_py3 import Load_Redis_Access from redis_support_py3.construct_data_handlers_py3 import Redis_RPC_Client from bootstrap_web_system_control_py3 import PI_Web_System_Control from bootstrap_web_monitoring_py3 import PI_Web_Monitor_Server from bootstrap_mqtt_client_py3 import PI_MQTT_Client_Monitor from bootstrap_eto_py3 import ETO_Management from file_server_library.file_server_lib_py3 import Construct_RPC_Library from bootstrap_irrigation_scheduling_py3 import Irrigation_Scheduling from irrigation_control.load_irrigation_control_py3 import Load_Irrigation_Control if __name__ == "__main__": file_handle = open("/data/redis_server.json",'r') data = file_handle.read() file_handle.close() redis_site_data = json.loads(data) pi_web_server = PI_Web_Server_Core(__name__, redis_site_data ) pi_web_server.generate_menu_page() pi_web_server.generate_site_map() pi_web_server.generate_default_index_page() port = pi_web_server.result["port"] pi_web_server.port = port debug = pi_web_server.result["debug"] pi_web_server.debug = debug https_flag = pi_web_server.result["https"] if https_flag == False: pi_web_server.run_https() else: pi_web_server.run_https()
[ 2, 201, 198, 2, 201, 198, 2, 220, 9220, 25, 42903, 62, 12384, 62, 9078, 18, 13, 9078, 201, 198, 2, 201, 198, 2, 201, 198, 2, 201, 198, 11748, 28686, 201, 198, 11748, 33918, 201, 198, 11748, 2266, 271, 201, 198, 11748, 2956, 297,...
2.434211
760
import os import logging from typing import Optional import click from git_talk.lib.changelog import generate_changelog from git_talk.lib.changelog.presenter import MarkdownPresenter from git_talk.lib.changelog.repository import GitRepository # @click.command() # @click.option( # "-r", # "--repo", # type=click.Path(exists=True), # default=".", # help="Path to the repository's root directory [Default: .]", # ) # @click.option("-t", "--title", default="Changelog", help="The changelog's title [Default: Changelog]") # @click.option("-d", "--description", help="Your project's description") # @click.option( # "-o", # "--output", # type=click.File("w"), # default="CHANGELOG.md", # help="The place to save the generated changelog [Default: CHANGELOG.md]", # ) # @click.option("-r", "--remote", default="origin", help="Specify git remote to use for links") # @click.option("-v", "--latest-version", type=str, help="use specified version as latest release") # @click.option("-u", "--unreleased", is_flag=True, default=False, help="Include section for unreleased changes") # @click.option("--diff-url", default=None, help="override url for compares, use {current} and {previous} for tags") # @click.option("--issue-url", default=None, help="Override url for issues, use {id} for issue id") # @click.option( # "--issue-pattern", # default=r"(#([\w-]+))", # help="Override regex pattern for issues in commit messages. Should contain two groups, original match and ID used " # "by issue-url.", # ) # @click.option( # "--tag-pattern", # default=None, # help="override regex pattern for release tags. " # "By default use semver tag names semantic. " # "tag should be contain in one group named 'version'.", # ) # @click.option("--tag-prefix", default="", help='prefix used in version tags, default: "" ') # @click.option("--stdout", is_flag=True) # @click.option("--tag-pattern", default=None, help="Override regex pattern for release tags") # @click.option("--starting-commit", help="Starting commit to use for changelog generation", default="") # @click.option("--stopping-commit", help="Stopping commit to use for changelog generation", default="HEAD") # @click.option( # "--debug", is_flag=True, help="set logging level to DEBUG", # ) if __name__ == "__main__": main()
[ 201, 198, 201, 198, 201, 198, 11748, 28686, 201, 198, 11748, 18931, 201, 198, 6738, 19720, 1330, 32233, 201, 198, 201, 198, 11748, 3904, 201, 198, 201, 198, 6738, 17606, 62, 16620, 13, 8019, 13, 354, 8368, 519, 1330, 7716, 62, 354, ...
2.793064
865
# Copyright (c) Stanford University # # This source code is patent protected and being made available under the # terms explained in the ../LICENSE-Academic and ../LICENSE-GOV files. # Author: Mario J Srouji # Email: msrouji@stanford.edu import copy import sys sys.path.append("../FormatParsers/") sys.path.append("../Interface/") import format_parser as P import module_interface as I
[ 2, 15069, 357, 66, 8, 13863, 2059, 198, 2, 198, 2, 770, 2723, 2438, 318, 12701, 6861, 290, 852, 925, 1695, 739, 262, 198, 2, 2846, 4893, 287, 262, 11485, 14, 43, 2149, 24290, 12, 12832, 49113, 290, 11485, 14, 43, 2149, 24290, 12, ...
3.254098
122
import tensorflow as tf def MultiBoxLoss(num_class=2, neg_pos_ratio=3): """multi-box loss""" return multi_box_loss
[ 11748, 11192, 273, 11125, 355, 48700, 628, 198, 198, 4299, 15237, 14253, 43, 793, 7, 22510, 62, 4871, 28, 17, 11, 2469, 62, 1930, 62, 10366, 952, 28, 18, 2599, 198, 220, 220, 220, 37227, 41684, 12, 3524, 2994, 37811, 628, 220, 220, ...
2.490196
51
import unittest import random from time import sleep import os from bingmaps import * # TODO: enter your key for testing api_key = '' if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 11748, 4738, 198, 6738, 640, 1330, 3993, 198, 11748, 28686, 198, 198, 6738, 275, 278, 31803, 1330, 1635, 198, 198, 2, 16926, 46, 25, 3802, 534, 1994, 329, 4856, 198, 15042, 62, 2539, 796, 10148, 198, 220, ...
2.728571
70
from cds.CloudflareWrapper import suggest_set_up, cf_config_filename from .FirewallWrapper import FirewallWrapper import logging as log
[ 6738, 269, 9310, 13, 18839, 2704, 533, 36918, 2848, 1330, 1950, 62, 2617, 62, 929, 11, 30218, 62, 11250, 62, 34345, 198, 6738, 764, 13543, 11930, 36918, 2848, 1330, 3764, 11930, 36918, 2848, 198, 11748, 18931, 355, 2604, 628, 198 ]
3.45
40
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2019-10-24 16:07 from __future__ import unicode_literals from django.db import migrations, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 319, 13130, 12, 940, 12, 1731, 1467, 25, 2998, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738...
2.8
55
# -*- coding: utf-8 -*- # cython: language_level=3 # Copyright (c) 2020 Nekokatt # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Events that fire when channels are modified. This does not include message events, nor reaction events. """ from __future__ import annotations __all__: typing.List[str] = [ "ChannelEvent", "GuildChannelEvent", "DMChannelEvent", "ChannelCreateEvent", "GuildChannelCreateEvent", "ChannelUpdateEvent", "GuildChannelUpdateEvent", "ChannelDeleteEvent", "GuildChannelDeleteEvent", "PinsUpdateEvent", "GuildPinsUpdateEvent", "DMPinsUpdateEvent", "InviteCreateEvent", "InviteDeleteEvent", "WebhookUpdateEvent", ] import abc import typing import attr from hikari import channels from hikari import intents from hikari import traits from hikari.events import base_events from hikari.events import shard_events from hikari.internal import attr_extensions if typing.TYPE_CHECKING: import datetime from hikari import guilds from hikari import invites from hikari import messages from hikari import snowflakes from hikari import webhooks from hikari.api import shard as gateway_shard # TODO: find out what private message intents are needed. # TODO: This is not documented as having an intent, is this right? The guild version requires GUILDS intent.
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 3075, 400, 261, 25, 3303, 62, 5715, 28, 18, 198, 2, 15069, 357, 66, 8, 12131, 37167, 482, 1078, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 387...
3.322222
720
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests the coordinator module """ from fabric.api import env from mock import patch from prestoadmin import coordinator from prestoadmin.util.exception import ConfigurationError from tests.base_test_case import BaseTestCase
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13...
3.808612
209
# Saul Castro # Hiralben Hirpara # config file format import random # list to string with sep 'char' # string to list, count # compare words, return 0 for no match, # 1 if end of a == start of b # 2 if end of b == start of a if __name__ == '__main__': readInput()
[ 2, 31603, 21193, 198, 2, 367, 21093, 11722, 29379, 1845, 64, 628, 198, 2, 4566, 2393, 5794, 198, 11748, 4738, 220, 198, 198, 2, 1351, 284, 4731, 351, 41767, 705, 10641, 6, 628, 198, 2, 4731, 284, 1351, 11, 954, 198, 198, 2, 8996, ...
2.936842
95
from global_utils import * # target word TARGET_WORD = 'right' if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--wavfile', help='Path to the .wav files', required=False ) parser.add_argument( '--indir', help='Absolute path to data directory containing .wav files', required=False ) args = parser.parse_args() main(args)
[ 6738, 3298, 62, 26791, 220, 1330, 1635, 198, 198, 2, 2496, 1573, 198, 51, 46095, 62, 54, 12532, 796, 705, 3506, 6, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 30751, 796, 1822, 29572,...
2.378378
185
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from .claim import Claim from .narrative import Node from .author import Authority
[ 2, 15069, 13130, 16972, 499, 844, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, ...
4.266667
180
import pygame, sys, random, time from pygame.locals import * # Create a Scoreboard class (from scratch) # Instance variables: screen, x, y, score, and font (size 30) # Methods: draw (and __init__) # Create a scoreboard at location 5, 5 # Draw the scoreboard in the game loop def main(): pygame.init() clock = pygame.time.Clock() pygame.display.set_caption("SPACE INVADERS!") screen = pygame.display.set_mode((640, 650)) enemy_rows = 3 enemy = EnemyFleet(screen, enemy_rows) fighter = Fighter(screen, 320, 590) scoreboard = Scoreboard(screen) gameover_image = pygame.image.load("gameover.png") is_game_over = False while True: clock.tick(60) for event in pygame.event.get(): pressed_keys = pygame.key.get_pressed() if event.type == KEYDOWN and pressed_keys[K_SPACE]: fighter.fire() if event.type == QUIT: sys.exit() screen.fill((0, 0, 0)) pressed_keys = pygame.key.get_pressed() if pressed_keys[K_LEFT] and fighter.x > -50: fighter.x = fighter.x - 5 if pressed_keys[K_RIGHT] and fighter.x < 590: fighter.x = fighter.x + 5 fighter.draw() enemy.move() enemy.draw() for missile in fighter.missiles: missile.move() missile.draw() for badguy in enemy.badguys: for missile in fighter.missiles: if badguy.hit_by(missile): scoreboard.score = scoreboard.score + 100 badguy.dead = True missile.exploded = True fighter.remove_exploded_missiles() enemy.remove_dead_badguys() if enemy.is_defeated: enemy_rows = enemy_rows + 1 enemy = EnemyFleet(screen, enemy_rows) scoreboard.draw() if not is_game_over: pygame.display.update() for badguy in enemy.badguys: if badguy.y > 545: screen.blit(gameover_image, (170, 200)) pygame.display.update() is_game_over = True main()
[ 11748, 12972, 6057, 11, 25064, 11, 4738, 11, 640, 198, 6738, 12972, 6057, 13, 17946, 874, 1330, 1635, 628, 628, 198, 198, 2, 13610, 257, 15178, 3526, 1398, 357, 6738, 12692, 8, 198, 2, 220, 220, 2262, 590, 9633, 25, 3159, 11, 2124, ...
2.098266
1,038
from guniflask.config import settings from guniflask.web import blueprint, get_route
[ 6738, 2485, 361, 75, 2093, 13, 11250, 1330, 6460, 198, 6738, 2485, 361, 75, 2093, 13, 12384, 1330, 30881, 11, 651, 62, 38629, 628 ]
3.583333
24
from ._base_trainer import _BaseTrainer, MeasureMemory import pathlib import torch.multiprocessing as mp import torch from torch import nn import horovod.torch as hvd import numpy as np import xarray as xr import itertools from .flow_dataset import FlowDataset from .unet import UNet import sys from .visualization import save_flows from .converter import save_as_netcdf
[ 6738, 47540, 8692, 62, 2213, 10613, 1330, 4808, 14881, 2898, 10613, 11, 24291, 30871, 198, 11748, 3108, 8019, 198, 11748, 28034, 13, 16680, 541, 305, 919, 278, 355, 29034, 198, 11748, 28034, 198, 6738, 28034, 1330, 299, 77, 198, 11748, ...
3.226087
115
from spinup import vpg import tensorflow as tf import numpy as np from gym.spaces import Box, Discrete from envs.focal_point_task_us_env import FocalPointTaskUsEnv from envs.phantom import ( ScatterersPhantom, Ball, Teddy ) from envs.imaging import ImagingSystem, Probe from envs.generator import ConstPhantomGenerator, RandomProbeGenerator import envs.logger import matplotlib import argparse N_STEPS_PER_EPISODE = 16 N_STEPS_PER_EPOCH = 64 EPOCHS = 251 # NO_EPISODES = (NSTEPS_PER_EPOCH/NSTEPS_PER_EPISODE)*EPOCHS N_WORKERS = 4 AC_KWARGS = dict( hidden_sizes=[16, 32], activation=tf.nn.relu ) # Below functions base on openai.spinup's A-C scheme implementation. if __name__ == "__main__": main()
[ 6738, 7906, 929, 1330, 410, 6024, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 11550, 13, 2777, 2114, 1330, 8315, 11, 8444, 8374, 198, 6738, 551, 14259, 13, 69, 4374, 62, 4122, 62, 35943, 6...
2.590106
283
import sys,os sys.path.append('/home/zongdaoming/cv/multi-organ/multi-organ-ijcai') from lib.losses.BaseClass import _AbstractDiceLoss from lib.losses.basic import *
[ 11748, 25064, 11, 418, 198, 17597, 13, 6978, 13, 33295, 10786, 14, 11195, 14, 89, 506, 6814, 3383, 14, 33967, 14, 41684, 12, 9971, 14, 41684, 12, 9971, 12, 2926, 66, 1872, 11537, 198, 6738, 9195, 13, 22462, 274, 13, 14881, 9487, 133...
2.741935
62
# encoding: utf-8 # # Copyright (c) 2019 Dean Jackson <deanishe@deanishe.net> # # MIT Licence. See http://opensource.org/licenses/MIT # # Created on 2019-09-06 # """Overlay check mark on icons.""" from __future__ import print_function, absolute_import from Cocoa import ( NSBitmapImageRep, NSPNGFileType, NSImage, NSMakeSize, NSCompositeCopy, NSSizeToCGSize, NSZeroPoint, ) from CoreGraphics import CGRectZero def overlay(src, overlay, dest): """Create image ``dest`` by putting ``overlay`` on top of ``src``. Args: src (str): Path to source image. overlay (str): Path to overlay image. dest (str): Path to save combined image to. """ src = NSImage.alloc().initWithContentsOfFile_(src) overlay = NSImage.alloc().initWithContentsOfFile_(overlay) img = NSImage.alloc().initWithSize_(src.size()) img.lockFocus() rect = (0, 0), src.size() src.drawInRect_(rect) overlay.drawInRect_(rect) img.unlockFocus() rep = NSBitmapImageRep.imageRepWithData_(img.TIFFRepresentation()) data = rep.representationUsingType_properties_(NSPNGFileType,{}) data.writeToFile_atomically_(dest, False)
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 2, 198, 2, 15069, 357, 66, 8, 13130, 11325, 6612, 1279, 2934, 272, 271, 258, 31, 2934, 272, 271, 258, 13, 3262, 29, 198, 2, 198, 2, 17168, 10483, 594, 13, 4091, 2638, 1378, 44813, 1668, 13, ...
2.638581
451
import datetime from threading import Thread from time import sleep import DBC.dbcreate as dbc
[ 11748, 4818, 8079, 198, 6738, 4704, 278, 1330, 14122, 198, 6738, 640, 1330, 3993, 198, 198, 11748, 360, 2749, 13, 9945, 17953, 355, 288, 15630, 198 ]
3.692308
26
# Copyright 2016 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import google.api_core.gapic_v1.method import mock TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] SQL_QUERY = """\ SELECT first_name, last_name, age FROM citizens ORDER BY age""" SQL_QUERY_WITH_PARAM = """ SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age""" PARAMS = {"max_age": 30} PARAM_TYPES = {"max_age": "INT64"} SQL_QUERY_WITH_BYTES_PARAM = """\ SELECT image_name FROM images WHERE @bytes IN image_data""" PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"} RESUME_TOKEN = b"DEADBEEF" TXN_ID = b"DEAFBEAD" SECONDS = 3 MICROS = 123456
[ 2, 15069, 1584, 3012, 11419, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, ...
3.081633
392
A = ['a','b'] B = ['c','b','a'] print compareHash(A, B)
[ 32, 796, 37250, 64, 41707, 65, 20520, 198, 33, 796, 37250, 66, 41707, 65, 41707, 64, 20520, 198, 198, 4798, 8996, 26257, 7, 32, 11, 347, 8, 198 ]
2.035714
28
# Importing required packages: import pandas as pd from tkinter import * from tkinter.ttk import * root = Tk() # To visualize input DataFrame: def generate_plot(gui_root, df, x_axis, y_axis=None, plot={'type':None, 'hue':None}, aesthetics={'style':'whitegrid', 'palette':'hsv', 'size':(10,7), 'dpi':100}): """ DESCRIPTION: Reads input Pandas DataFrame and returns a plot based on selected parameters. PARAMETERS: > gui_root : [Required] Accepts Tkinter application base class (Tk) initialized variable/instance. > df : [Required] Accepts Pandas DataFrame. """ # Importing external dependencies: import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.figure import Figure import seaborn as sns sns.set(style=aesthetics['style'], palette=aesthetics['palette']) import warnings warnings.filterwarnings('ignore') # Defining Tableau colors: tableau_20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] # Scaling over RGB values to [0,1] range (Matplotlib acceptable format): for i in range(len(tableau_20)): r,g,b = tableau_20[i] tableau_20[i] = (r/255., g/255., b/255.) # Setting up Tkinter Frame: lf = Labelframe(gui_root) lf.grid(row=0, column=0, sticky='nwes', padx=3, pady=3) # Setting up Canvas backed by Matplotlib: fig = Figure(figsize=aesthetics['size'], dpi=aesthetics['dpi']) ax = fig.add_subplot(111) # Drawing various plots with Seaborn: if plot['type']=='lineplot': # Lineplot g = sns.lineplot(x=x_axis, y=y_axis, data=df, ax=ax) elif plot['type']=='regplot': # Regplot g = sns.regplot(x=x_axis, y=y_axis, data=df, color=tableau_20[16], ax=ax) elif plot['type']=='distplot': # Distplot g = sns.distplot(a=df[x_axis].dropna(), color=tableau_20[7], hist_kws=dict(edgecolor='k', linewidth=0.5), ax=ax) elif plot['type']=='barplot': # Grouped Barplot g = sns.catplot(x=x_axis, y=y_axis, hue=plot['hue'], data=df, kind="bar", palette='rocket', ax=ax) g.despine(left=True) else: # More to be added later pass # Displaying plot on Canvas: canvas = FigureCanvasTkAgg(fig, master=lf) canvas.draw() canvas.get_tk_widget().grid(row=0, column=0) generate_plot() root.mainloop()
[ 2, 17267, 278, 2672, 10392, 25, 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 6738, 256, 74, 3849, 1330, 1635, 201, 198, 6738, 256, 74, 3849, 13, 926, 74, 1330, 1635, 201, 198, 201, 198, 15763, 796, 309, 74, 3419, 201, 198,...
2.125915
1,366
import os import torch from torch.utils.data import DataLoader, TensorDataset import requests import io import zipfile from .data_reader import read_vocabulary, read_lm_data, lm_data_producer from .pre_process_wikitext import pre_process def get_dataset(dataset, batch_size, device): """ Returns data iterator for each set and vocabulary """ download_dataset(dataset) # downloads and preprocess dataset if needed if dataset == "wiki-02": data_files = [".data/wikitext-2/wikitext-2/wiki.train.tokens.sents", ".data/wikitext-2/wikitext-2/wiki.valid.tokens.sents", ".data/wikitext-2/wikitext-2/wiki.test.tokens.sents"] vocab_size = 33278 + 1 # add 1 to account for PAD if dataset == 'ptb': data_files = [".data/penn-treebank/ptb.train.txt", ".data/penn-treebank/ptb.valid.txt", ".data/penn-treebank/ptb.test.txt"] vocab_size = 10000 + 1 # add 1 to account for PAD vocabulary = read_vocabulary(data_files, vocab_size) train_data, valid_data, test_data = read_lm_data(data_files, vocabulary) # Convert numpy to datasets and obtain iterators for each train_data = lm_data_producer(train_data) train_x = torch.tensor(train_data[0], dtype=torch.long, device=device) train_y = torch.tensor(train_data[1], dtype=torch.long, device=device) train_lengths = torch.tensor( train_data[2], dtype=torch.float, device=device) train_dataset = TensorDataset(train_x, train_y, train_lengths) valid_data = lm_data_producer(valid_data) valid_x = torch.tensor(valid_data[0], dtype=torch.long, device=device) valid_y = torch.tensor(valid_data[1], dtype=torch.long, device=device) valid_lengths = torch.tensor( valid_data[2], dtype=torch.float, device=device) valid_dataset = TensorDataset(valid_x, valid_y, valid_lengths) test_data = lm_data_producer(test_data) test_x = torch.tensor(test_data[0], dtype=torch.long, device=device) test_y = torch.tensor(test_data[1], dtype=torch.long, device=device) test_lengths = torch.tensor(test_data[2], dtype=torch.float, device=device) test_dataset = TensorDataset(test_x, test_y, test_lengths) train_iter = DataLoader(train_dataset, batch_size=batch_size) valid_iter = DataLoader(valid_dataset, batch_size=batch_size) test_iter = DataLoader(test_dataset, batch_size=batch_size) return train_iter, valid_iter, test_iter, vocabulary # downloading/preprocessing functions
[ 11748, 28686, 198, 11748, 28034, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 6060, 17401, 11, 309, 22854, 27354, 292, 316, 198, 11748, 7007, 198, 11748, 33245, 198, 11748, 19974, 7753, 198, 198, 6738, 764, 7890, 62, 46862, 1330, 1100, ...
2.332437
1,116
from tkinter import * #Cria a nossa tela instancia = Tk() #D um ttulo a tela instancia.title('Calculadora para Estatstica') #D um tamanho a tela instancia.geometry("800x600") #D um cone ao aplicativo #instancia.wm_iconbitmap('icone.ico') #Inicia o programa instancia.mainloop()
[ 6738, 256, 74, 3849, 1330, 1635, 198, 198, 2, 34, 7496, 257, 299, 793, 64, 256, 10304, 198, 8625, 1192, 544, 796, 309, 74, 3419, 198, 198, 2, 35, 23781, 256, 83, 43348, 257, 256, 10304, 198, 8625, 1192, 544, 13, 7839, 10786, 9771,...
2.319672
122
# -*- coding: utf-8 -*- """ Created on Sat Mar 19 09:42:09 2022 @author: iaala """ import requests import sql_configs import datetime import os from bs4 import BeautifulSoup import time from find_tables import ( table_information_one, table_information_two, table_information_three, table_information_four, ) from create_connection import create_sql_connection import columns __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if __name__ == "__main__": main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 7031, 1526, 678, 7769, 25, 3682, 25, 2931, 33160, 198, 198, 31, 9800, 25, 220, 544, 6081, 198, 37811, 198, 198, 11748, 7007, 198, 11748, 44161, ...
2.778947
190
from django.urls import resolve, reverse from django.test import TestCase from matches.views import matches_index from matches.models import Match
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 10568, 11, 9575, 198, 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 7466, 13, 33571, 1330, 7466, 62, 9630, 198, 6738, 7466, 13, 27530, 1330, 13225, 628 ]
4.111111
36
from typing import Callable, Tuple import numpy as np def posterior_factory(y: np.ndarray, sigma_y: float, sigma_theta: float) -> Tuple[Callable]: """The banana distribution is a distribution that exhibits a characteristic banana-shaped ridge that resembles the posterior that can emerge from models that are not identifiable. The distribution is the posterior of the following generative model. y ~ Normal(theta[0] + theta[1]**2, sigma_sq_y) theta[i] ~ Normal(0, sigma_sq_theta) Args: y: Observations of the banana model. sigma_y: Standard deviation of the observations. sigma_theta: Standard deviation of prior over linear coefficients. Returns: log_posterior: Function to compute the log-posterior. metric: Function to compute the Fisher information metric. euclidean_auxiliaries: Function to compute the log-posterior and its gradient. riemannian_auxiliaries: Function to compute the log-posterior, the gradient of the log-posterior, the Fisher information metric, and the derivatives of the Fisher information metric. """ sigma_sq_y = np.square(sigma_y) sigma_sq_theta = np.square(sigma_theta) def log_posterior(theta: np.ndarray) -> float: """The banana-shaped distribution posterior. Args: theta: Linear coefficients. Returns: out: The log-posterior of the banana-shaped distribution. """ p = theta[0] + np.square(theta[1]) ll = -0.5 / sigma_sq_y * np.square(y - p).sum() lp = -0.5 / sigma_sq_theta * np.square(theta).sum() return ll + lp def grad_log_posterior(theta: np.ndarray) -> np.ndarray: """Gradient of the banana-shaped distribution with respect to the linear coefficients. Args: theta: Linear coefficients. Returns: out: The gradient of the log-posterior of the banana-shaped distribution with respect to the linear coefficients. """ p = theta[0] + np.square(theta[1]) d = np.sum(y - p) ga = d / sigma_sq_y - theta[0] / sigma_sq_theta gb = 2.0*d / sigma_sq_y * theta[1] - theta[1] / sigma_sq_theta return np.hstack((ga, gb)) def metric(theta: np.ndarray) -> np.ndarray: """The Fisher information is the negative expected outer product of the gradient of the posterior. Args: theta: Linear coefficients. Returns: G: The Fisher information metric of the banana-shaped distribution. """ n = y.size s = 2.0*n*theta[1] / sigma_sq_y G = np.array([[n / sigma_sq_y + 1.0 / sigma_sq_theta, s], [s, 4.0*n*np.square(theta[1]) / sigma_sq_y + 1.0 / sigma_sq_theta]]) return G def grad_metric(theta: np.ndarray) -> np.ndarray: """The gradient of the Fisher information metric with respect to the linear coefficients. Args: theta: Linear coefficients. Returns: dG: The gradient of the Fisher information metric with respect to the linear coefficients. """ n = y.size dG = np.array([ [[0.0, 0.0], [0.0, 2.0*n / sigma_sq_y]], [[0.0, 2.0*n / sigma_sq_y], [0.0, 8.0*n*theta[1] / sigma_sq_y]] ]) return dG def euclidean_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]: """Function to compute the log-posterior and the gradient of the log-posterior. Args: theta: Linear coefficients. Returns: lp: The log-posterior of the banana-shaped distribution. glp: The gradient of the log-posterior of the banana-shaped distribution with respect to the linear coefficients. """ lp = log_posterior(theta) glp = grad_log_posterior(theta) return lp, glp def riemannnian_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]: """Function to compute the log-posterior, the gradient of the log-posterior, the Fisher information metric and the derivatives of the Fisher information metric. Args: theta: Linear coefficients. Returns: lp: The log-posterior of the banana-shaped distribution. glp: The gradient of the log-posterior of the banana-shaped distribution with respect to the linear coefficients. G: The Fisher information metric of the banana-shaped distribution. dG: The gradient of the Fisher information metric with respect to the linear coefficients. """ lp = log_posterior(theta) glp = grad_log_posterior(theta) G = metric(theta) dG = grad_metric(theta) return lp, glp, G, dG return log_posterior, metric, log_posterior_and_metric, euclidean_auxiliaries, riemannnian_auxiliaries def generate_data(t: float, sigma_y: float, sigma_theta: float, num_obs: int) -> np.ndarray: """Generate data from the banana-shaped posterior distribution. Args: t: Free-parameter determining the thetas. sigma_y: Noise standard deviation. sigma_theta: Prior standard deviation over the thetas. num_obs: Number of observations to generate. Returns: theta: Linear coefficients of the banana-shaped distribution. y: Observations from the unidentifiable model. """ theta = np.array([t, np.sqrt(1.0 - t)]) y = theta[0] + np.square(theta[1]) + sigma_y * np.random.normal(size=(num_obs, )) return theta, y
[ 6738, 19720, 1330, 4889, 540, 11, 309, 29291, 198, 198, 11748, 299, 32152, 355, 45941, 628, 198, 4299, 34319, 62, 69, 9548, 7, 88, 25, 45941, 13, 358, 18747, 11, 264, 13495, 62, 88, 25, 12178, 11, 264, 13495, 62, 1169, 8326, 25, 1...
2.318458
2,465
from binaryninja import ( Architecture, BranchType, FlagRole, InstructionInfo, LowLevelILFlagCondition, RegisterInfo, ) from .instructions import TYPE3_INSTRUCTIONS, Instruction, Registers from .lifter import Lifter
[ 6738, 13934, 35073, 6592, 1330, 357, 198, 220, 220, 220, 29778, 11, 198, 220, 220, 220, 20551, 6030, 11, 198, 220, 220, 220, 19762, 47445, 11, 198, 220, 220, 220, 46486, 12360, 11, 198, 220, 220, 220, 7754, 4971, 4146, 34227, 48362, ...
3.025
80
__all__ = [ 'session', 'event', 'profile', 'consent', 'segment', 'source', 'rule', 'entity' ]
[ 834, 439, 834, 796, 685, 198, 220, 220, 220, 705, 29891, 3256, 198, 220, 220, 220, 705, 15596, 3256, 198, 220, 220, 220, 705, 13317, 3256, 198, 220, 220, 220, 705, 5936, 298, 3256, 198, 220, 220, 220, 705, 325, 5154, 3256, 198, 22...
1.909091
66
from salience_metrics import auc_judd, auc_shuff, cc, nss, similarity, normalize_map """ DHF1K paper: "we employ five classic met-rics, namely Normalized Scanpath Saliency (NSS), Sim-ilarity Metric (SIM), Linear Correlation Coefficient (CC),AUC-Judd (AUC-J), and shuffled AUC (s-AUC)."" """ import cv2 import os import numpy as np import time import pickle gt_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/maps" sm_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/predictions" final_metric_list = [] # The directories are named 1-1000 so it should be easy to iterate over them for i in range(1,701): start = time.clock() gt_path = os.path.join(gt_directory, str(i)) sm_path = os.path.join(sm_directory, str(i)) gt_files = os.listdir(gt_path) sm_files = os.listdir(sm_path) #Now to sort based on their file number. The "key" parameter in sorted is a function based on which the sorting will happen (I use split to exclude the jpg/png from the). gt_files_sorted = sorted(gt_files, key = lambda x: int(x.split(".")[0]) ) sm_files_sorted = sorted(sm_files, key = lambda x: int(x.split(".")[0]) ) pack = zip(gt_files_sorted, sm_files_sorted) print("Files related to video {} sorted.".format(i)) ## ##https://stackoverflow.com/questions/35663498/how-do-i-return-a-matrix-with-joblib-python from joblib import Parallel, delayed start = time.clock() metric_list = Parallel(n_jobs=8)(delayed(inner_worker)(n, packed, gt_path, sm_path) for n, packed in enumerate(pack)) #run 8 frames simultaneously aucj_mean = np.mean([x[0] for x in metric_list]) aucs_mean = np.mean([x[1] for x in metric_list]) nss_mean = np.mean([x[2] for x in metric_list]) cc_mean = np.mean([x[3] for x in metric_list]) sim_mean = np.mean([x[4] for x in metric_list]) print("For video number {} the metrics are:".format(i)) print("AUC-JUDD is {}".format(aucj_mean)) print("AUC-SHUFFLED is {}".format(aucs_mean)) print("NSS is {}".format(nss_mean)) print("CC is {}".format(cc_mean)) print("SIM is {}".format(sim_mean)) print("Time elapsed: {}".format(time.clock()-start)) print("==============================") final_metric_list.append(( aucj_mean, aucs_mean, nss_mean, cc_mean, sim_mean )) with open('metrics.txt', 'wb') as handle: pickle.dump(final_metric_list, handle, protocol=pickle.HIGHEST_PROTOCOL) Aucj = np.mean([y[0] for y in final_metric_list]) Aucs = np.mean([y[1] for y in final_metric_list]) Nss = np.mean([y[2] for y in final_metric_list]) Cc = np.mean([y[3] for y in final_metric_list]) Sim = np.mean([y[4] for y in final_metric_list]) print("Final average of metrics is:") print("AUC-JUDD is {}".format(Aucj)) print("AUC-SHUFFLED is {}".format(Aucs)) print("NSS is {}".format(Nss)) print("CC is {}".format(Cc)) print("SIM is {}".format(Sim))
[ 6738, 3664, 1240, 62, 4164, 10466, 1330, 257, 1229, 62, 73, 4185, 11, 257, 1229, 62, 1477, 1648, 11, 36624, 11, 299, 824, 11, 26789, 11, 3487, 1096, 62, 8899, 198, 37811, 198, 35, 29567, 16, 42, 3348, 25, 366, 732, 220, 1873, 220,...
2.408246
1,237
import pytest from thefuck.rules.pacman_invalid_option import get_new_command from thefuck.rules.pacman_invalid_option import match from thefuck.types import Command good_output = """community/shared_meataxe 1.0-3 A set of programs for working with matrix representations over finite fields """ bad_output = "error: invalid option '-"
[ 11748, 12972, 9288, 198, 198, 6738, 262, 31699, 13, 38785, 13, 33587, 805, 62, 259, 12102, 62, 18076, 1330, 651, 62, 3605, 62, 21812, 198, 6738, 262, 31699, 13, 38785, 13, 33587, 805, 62, 259, 12102, 62, 18076, 1330, 2872, 198, 6738, ...
3.46
100
# Copyright 2019 D-Wave Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================= """ A composite that scales problem variables as directed. if scalar is not given calculates it based on quadratic and bias ranges. """ try: import collections.abc as abc except ImportError: import collections as abc from numbers import Number import numpy as np from dimod.binary_quadratic_model import BinaryQuadraticModel from dimod.core.composite import ComposedSampler __all__ = 'ScaleComposite', def sample_ising(self, h, J, offset=0, scalar=None, bias_range=1, quadratic_range=None, ignored_variables=None, ignored_interactions=None, ignore_offset=False, **parameters): """ Scale and sample from the problem provided by h, J, offset if scalar is not given, problem is scaled based on bias and quadratic ranges. Args: h (dict): linear biases J (dict): quadratic or higher order biases offset (float, optional): constant energy offset scalar (number): Value by which to scale the energy range of the binary quadratic model. bias_range (number/pair): Value/range by which to normalize the all the biases, or if `quadratic_range` is provided, just the linear biases. quadratic_range (number/pair): Value/range by which to normalize the quadratic biases. ignored_variables (iterable, optional): Biases associated with these variables are not scaled. ignored_interactions (iterable[tuple], optional): As an iterable of 2-tuples. Biases associated with these interactions are not scaled. ignore_offset (bool, default=False): If True, the offset is not scaled. **parameters: Parameters for the sampling method, specified by the child sampler. Returns: :obj:`dimod.SampleSet` """ if any(len(inter) > 2 for inter in J): # handle HUBO import warnings msg = ("Support for higher order Ising models in ScaleComposite is " "deprecated and will be removed in dimod 0.9.0. Please use " "PolyScaleComposite.sample_hising instead.") warnings.warn(msg, DeprecationWarning) from dimod.reference.composites.higherordercomposites import PolyScaleComposite from dimod.higherorder.polynomial import BinaryPolynomial poly = BinaryPolynomial.from_hising(h, J, offset=offset) ignored_terms = set() if ignored_variables is not None: ignored_terms.update(frozenset(v) for v in ignored_variables) if ignored_interactions is not None: ignored_terms.update(frozenset(inter) for inter in ignored_interactions) if ignore_offset: ignored_terms.add(frozenset()) return PolyScaleComposite(self.child).sample_poly(poly, scalar=scalar, bias_range=bias_range, poly_range=quadratic_range, ignored_terms=ignored_terms, **parameters) bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset) return self.sample(bqm, scalar=scalar, bias_range=bias_range, quadratic_range=quadratic_range, ignored_variables=ignored_variables, ignored_interactions=ignored_interactions, ignore_offset=ignore_offset, **parameters) def _scale_back_response(bqm, response, scalar, ignored_interactions, ignored_variables, ignore_offset): """Helper function to scale back the response of sample method""" if len(ignored_interactions) + len( ignored_variables) + ignore_offset == 0: response.record.energy = np.divide(response.record.energy, scalar) else: response.record.energy = bqm.energies((response.record.sample, response.variables)) return response def _check_params(ignored_variables, ignored_interactions): """Helper for sample methods""" if ignored_variables is None: ignored_variables = set() elif not isinstance(ignored_variables, abc.Container): ignored_variables = set(ignored_variables) if ignored_interactions is None: ignored_interactions = set() elif not isinstance(ignored_interactions, abc.Container): ignored_interactions = set(ignored_interactions) return ignored_variables, ignored_interactions def _calc_norm_coeff(h, J, bias_range, quadratic_range, ignored_variables, ignored_interactions): """Helper function to calculate normalization coefficient""" if ignored_variables is None or ignored_interactions is None: raise ValueError('ignored interactions or variables cannot be None') if quadratic_range is None: linear_range, quadratic_range = bias_range, bias_range else: linear_range = bias_range lin_range, quad_range = map(parse_range, (linear_range, quadratic_range)) lin_min, lin_max = min_and_max([v for k, v in h.items() if k not in ignored_variables]) quad_min, quad_max = min_and_max([v for k, v in J.items() if not check_isin(k, ignored_interactions)]) inv_scalar = max(lin_min / lin_range[0], lin_max / lin_range[1], quad_min / quad_range[0], quad_max / quad_range[1]) if inv_scalar != 0: return 1. / inv_scalar else: return 1. def _scaled_bqm(bqm, scalar, bias_range, quadratic_range, ignored_variables, ignored_interactions, ignore_offset): """Helper function of sample for scaling""" bqm_copy = bqm.copy() if scalar is None: scalar = _calc_norm_coeff(bqm_copy.linear, bqm_copy.quadratic, bias_range, quadratic_range, ignored_variables, ignored_interactions) bqm_copy.scale(scalar, ignored_variables=ignored_variables, ignored_interactions=ignored_interactions, ignore_offset=ignore_offset) bqm_copy.info.update({'scalar': scalar}) return bqm_copy def check_isin(key, key_list): return sum(set(key) == set(key_tmp) for key_tmp in key_list)
[ 2, 15069, 13130, 360, 12, 39709, 11998, 3457, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 220, 220, 220, 345, 743, 407, 779, 428, 2393, 2845, 287,...
2.228444
3,375
import os
[ 11748, 28686, 628, 628 ]
3.25
4
from django import forms from django.core import validators from django.core.exceptions import ValidationError from leasing.enums import ( InfillDevelopmentCompensationState, LeaseState, TenantContactType, ) from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality from leasing.validators import validate_business_id
[ 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, 13, 7295, 1330, 4938, 2024, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 3254, 24765, 12331, 198, 198, 6738, 42150, 13, 268, 5700, 1330, 357, 198, 220, 220, 220, 4806...
3.63
100
# polls/management/commands/create_admin_user.py import sys import logging from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from django.conf import settings
[ 2, 9231, 14, 27604, 14, 9503, 1746, 14, 17953, 62, 28482, 62, 7220, 13, 9078, 198, 11748, 25064, 198, 11748, 18931, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 13, 8692, 1330, 7308, 21575, 11, 9455, 12331, 198, 6738, 42625, 142...
3.666667
60
import unirest import json import requests import os import subprocess import time import argparse rootUrl = "https://api.unsplash.com/" unirest.default_header("Accept", "application/json") unirest.default_header("Accept-Version", "v1") unirest.default_header("Authorization","<CLIENT-ID>") while True: parser = argparse.ArgumentParser() parser.add_argument('integers', metavar='int', type=int, help='time between wallpaper change (in seconds)') args = parser.parse_args() print "waiting for %s seconds" % args.integers time.sleep(args.integers) downloadPic(unirest.get(rootUrl + "photos/random", params={"orientation":"landscape"}))#.body["id"]
[ 11748, 555, 557, 301, 198, 11748, 33918, 198, 11748, 7007, 198, 11748, 28686, 198, 11748, 850, 14681, 198, 11748, 640, 198, 11748, 1822, 29572, 198, 198, 15763, 28165, 796, 366, 5450, 1378, 15042, 13, 13271, 489, 1077, 13, 785, 30487, 1...
2.977876
226
# Copyright (c) 2017, Somia Reality Oy # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import asyncio import logging from functools import partial from ninchat.client.asyncio import Session log = logging.getLogger(__name__)
[ 2, 15069, 357, 66, 8, 2177, 11, 9995, 544, 22520, 39447, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, 2, 17613, 11, 389, 10431, 2810, 326, 262, 1...
3.454756
431
from datetime import datetime
[ 6738, 4818, 8079, 1330, 4818, 8079, 628 ]
4.428571
7
from caching.base import CachingManager, CachingMixin from django.db import models from tsdata.models import CensusProfile PURPOSE_CHOICES = ( (1, "Speed Limit Violation"), (2, "Stop Light/Sign Violation"), (3, "Driving While Impaired"), (4, "Safe Movement Violation"), (5, "Vehicle Equipment Violation"), (6, "Vehicle Regulatory Violation"), (7, "Seat Belt Violation"), (8, "Investigation"), (9, "Other Motor Vehicle Violation"), (10, "Checkpoint"), ) ACTION_CHOICES = ( (1, "Verbal Warning"), (2, "Written Warning"), (3, "Citation Issued"), (4, "On-View Arrest"), (5, "No Action Taken"), ) PERSON_TYPE_CHOICES = (("D", "Driver"), ("P", "Passenger")) GENDER_CHOICES = (("M", "Male"), ("F", "Female")) ETHNICITY_CHOICES = (("H", "Hispanic"), ("N", "Non-Hispanic")) RACE_CHOICES = ( ("A", "Asian"), ("B", "Black"), ("I", "Native American"), ("U", "Other"), ("W", "White"), ) SEARCH_TYPE_CHOICES = ( (1, "Consent"), (2, "Search Warrant"), (3, "Probable Cause"), (4, "Search Incident to Arrest"), (5, "Protective Frisk"), ) SEARCH_BASIS_CHOICES = ( ("ER", "Erratic/Suspicious Behavior"), ("OB", "Observation of Suspected Contraband"), ("OI", "Other Official Information"), ("SM", "Suspicious Movement"), ("TIP", "Informant Tip"), ("WTNS", "Witness Observation"), )
[ 6738, 40918, 13, 8692, 1330, 327, 8103, 13511, 11, 327, 8103, 35608, 259, 198, 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 40379, 7890, 13, 27530, 1330, 20962, 37046, 198, 198, 47, 4261, 48933, 62, 44899, 34444, 796, 357, 198, ...
2.515206
559
from .login import LoginForm from .registration import RegistrationForm
[ 6738, 764, 38235, 1330, 23093, 8479, 201, 198, 6738, 764, 2301, 33397, 1330, 24610, 8479, 201, 198 ]
4.352941
17
from django.urls import path from django.contrib import admin from rest_framework_swagger.views import get_swagger_view from .views import notification schema_view = get_swagger_view(title='MAIL API') urlpatterns = [ path('front/betsy/irish/embargo/admin/', admin.site.urls), # Swagger API path( 'api/', schema_view, name='api' ), # notification path( 'notification/', notification.NotificationServicesRest.as_view(), name=notification.NotificationServicesRest.name ), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 1334, 62, 30604, 62, 2032, 7928, 13, 33571, 1330, 651, 62, 2032, 7928, 62, 1177, 198, 198, 6738, 764, 33571, 1330, 14483, 19...
2.497738
221
from unittest import mock SOCK_STREAM = 0 set_interface = mock.Mock() interface = mock.MagicMock() getaddrinfo = mock.Mock() socket = mock.Mock()
[ 6738, 555, 715, 395, 1330, 15290, 198, 198, 50, 11290, 62, 2257, 32235, 796, 657, 198, 198, 2617, 62, 39994, 796, 15290, 13, 44, 735, 3419, 198, 39994, 796, 15290, 13, 22975, 44, 735, 3419, 198, 1136, 29851, 10951, 796, 15290, 13, 4...
2.759259
54
# -*- coding: utf-8 -*- import api,points from api.bottle import * II_PATH=os.path.dirname(__file__) or '.' TEMPLATE_PATH.insert(0,II_PATH) def _point_msg(pauth,tmsg): msgfrom, addr = points.check_hash(pauth) if not addr: return 'auth error!' cfg = api.load_echo(False) mo = api.toss(msgfrom,'%s,%s' % (cfg[0][1],addr),tmsg.strip()) if mo.msg.startswith('@repto:'): tmpmsg = mo.msg.splitlines() mo.repto = tmpmsg[0][7:] mo.msg = '\n'.join(tmpmsg[1:]) # - api.toss if len(mo.msg.encode('utf-8')) < 64100: h = api.point_newmsg(mo) if h: return 'msg ok:%s: <a href="/%s">%s</a>' % (h, mo.echoarea, mo.echoarea) else: return 'error:unknown' else: return 'msg big!' import iitpl iitpl.II_PATH=II_PATH run(host='127.0.0.1',port=62220,debug=False)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 40391, 11, 13033, 198, 6738, 40391, 13, 10985, 293, 1330, 1635, 198, 198, 3978, 62, 34219, 28, 418, 13, 6978, 13, 15908, 3672, 7, 834, 7753, 834, 8, 393, ...
1.928889
450
#!/usr/bin/env python from ncclient import manager import sys from lxml import etree # Set the device variables DEVICES = ['172.16.30.101', '172.16.30.102'] USER = 'admin' PASS = 'admin' PORT = 830 LOOPBACK_IP = { '172.16.30.101': '10.99.99.1/24', '172.16.30.102': '10.99.99.2/24' } DEVICE_NAMES = {'172.16.30.101': '(nx-osv9000-1)', '172.16.30.102': '(nx-osv9000-2)' } # create a main() method def main(): """ Main method that adds an IP address to interface loopback 99 to both the spine switches. """ loopback_ip_add = """ <config> <System xmlns="http://cisco.com/ns/yang/cisco-nx-os-device"> <ipv4-items> <inst-items> <dom-items> <Dom-list> <name>default</name> <if-items> <If-list> <id>lo99</id> <addr-items> <Addr-list> <addr>{}</addr> </Addr-list> </addr-items> </If-list> </if-items> </Dom-list> </dom-items> </inst-items> </ipv4-items> </System> </config>""" for device in DEVICES: with manager.connect(host=device, port=PORT, username=USER, password=PASS, hostkey_verify=False, device_params={'name': 'nexus'}, look_for_keys=False, allow_agent=False) as m: # Add the loopback interface print("\nNow adding IP address {} to device {} {}...\n".format(LOOPBACK_IP[device], DEVICE_NAMES[device], device)) new_ip = loopback_ip_add.format(LOOPBACK_IP[device]) netconf_response = m.edit_config(target='running', config=new_ip) # Parse the XML response print(netconf_response) if __name__ == '__main__': sys.exit(main())
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 299, 535, 75, 1153, 1330, 4706, 198, 11748, 25064, 198, 6738, 300, 19875, 1330, 2123, 631, 628, 198, 2, 5345, 262, 3335, 9633, 198, 198, 39345, 34444, 796, 37250, 23628, 13,...
1.749369
1,189
# To run this file, Win Start > cmd > file dir > run: python spacex_dash_app.py # Import required libraries import pandas as pd import dash from dash import html from dash import dcc from dash.dependencies import Input, Output import plotly.express as px # Read the airline data into pandas dataframe spacex_df = pd.read_csv("spacex_launch_dash.csv") max_payload = spacex_df['Payload Mass (kg)'].max() min_payload = spacex_df['Payload Mass (kg)'].min() # Dropdown list(s) launch_site_list = [] launch_site_list.append('ALL') for index, row in spacex_df['Launch Site'].value_counts().to_frame().iterrows(): launch_site_list.append(row.name) # Create a dash application app = dash.Dash(__name__) # Create an app layout app.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard', style={'textAlign': 'center', 'color': '#503D36', 'font-size': 40}), # TASK 1: Add a dropdown list to enable Launch Site selection # The default select value is for ALL sites # dcc.Dropdown(id='site-dropdown',...) dcc.Dropdown(id='site-dropdown', options=[{'label': i, 'value': i} for i in launch_site_list], style={'width':'100%', 'padding':'3px', 'font-size': '20px', 'text-align-last': 'left'}, value='ALL'), html.Br(), # TASK 2: Add a pie chart to show the total successful launches count for all sites # If a specific launch site was selected, show the Success vs. Failed counts for the site html.Div(dcc.Graph(id='success-pie-chart')), html.Br(), html.P("Payload range (Kg):"), # TASK 3: Add a slider to select payload range #dcc.RangeSlider(id='payload-slider',...) dcc.RangeSlider(id='payload-slider', min=min_payload, max=max_payload, step=1000, value=[min_payload, max_payload]), # TASK 4: Add a scatter chart to show the correlation between payload and launch success html.Div(dcc.Graph(id='success-payload-scatter-chart')), ]) # TASK 2: # Add a callback function for `site-dropdown` as input, `success-pie-chart` as output # TASK 4: # Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output # Run the app if __name__ == '__main__': app.run_server(debug=True) # Finding Insights Visually # Now with the dashboard completed, you should be able to use it to analyze SpaceX launch data, and answer the following questions: # # Which site has the largest successful launches? ### KSC LC-39A # Which site has the highest launch success rate? ### KSC LC-39A # Which payload range(s) has the highest launch success rate? ### 2000 - 4000 # Which payload range(s) has the lowest launch success rate? ### 6000 - 9000 # Which F9 Booster version (v1.0, v1.1, FT, B4, B5, etc.) has the highest launch success rate? ### B5
[ 2, 1675, 1057, 428, 2393, 11, 7178, 7253, 1875, 23991, 1875, 2393, 26672, 1875, 1057, 25, 21015, 2272, 87, 62, 42460, 62, 1324, 13, 9078, 198, 2, 17267, 2672, 12782, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 14470, 198, 6738, ...
2.167623
1,569
""" The best randomly-searched ResNet reported in the paper. In the original paper there is a bug. This network sums together layers after the ReLU nonlinearity, which are not Gaussian, and also do not have mean 0. As a result, the overall network does not converge to a Gaussian process. The defined kernel is still valid, even if it doesn't correspond to a NN. In the interest of making the results replicable, we have replicated this bug as well. The correct way to use ResNets is to sum things after a Conv2d layer, see for example the `resnet_block` in `cnn_gp/kernels.py`. """ import torchvision from cnn_gp import Conv2d, ReLU, Sequential, Sum train_range = range(5000, 55000) validation_range = list(range(55000, 60000)) + list(range(0, 5000)) test_range = range(60000, 70000) dataset_name = "MNIST" model_name = "ResNet" dataset = torchvision.datasets.MNIST transforms = [] epochs = 0 in_channels = 1 out_channels = 10 var_bias = 4.69 var_weight = 7.27 initial_model = Sequential( *(Sum([ Sequential(), Sequential( Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2, var_bias=var_bias), ReLU(), )]) for _ in range(8)), Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2, var_bias=var_bias), ReLU(), Conv2d(kernel_size=28, padding=0, var_weight=var_weight, var_bias=var_bias), )
[ 37811, 198, 464, 1266, 15456, 12, 325, 283, 1740, 1874, 7934, 2098, 287, 262, 3348, 13, 198, 198, 818, 262, 2656, 3348, 612, 318, 257, 5434, 13, 770, 3127, 21784, 1978, 11685, 706, 198, 1169, 797, 41596, 1729, 29127, 414, 11, 543, 3...
2.581227
554
import matplotlib.pyplot as plt import numpy as np # daily search trend for keyword 'flowers' for a year d = [ 1.04, 1.04, 1.16, 1.22, 1.46, 2.34, 1.16, 1.12, 1.24, 1.30, 1.44, 1.22, 1.26, 1.34, 1.26, 1.40, 1.52, 2.56, 1.36, 1.30, 1.20, 1.12, 1.12, 1.12, 1.06, 1.06, 1.00, 1.02, 1.04, 1.02, 1.06, 1.02, 1.04, 0.98, 0.98, 0.98, 1.00, 1.02, 1.02, 1.00, 1.02, 0.96, 0.94, 0.94, 0.94, 0.96, 0.86, 0.92, 0.98, 1.08, 1.04, 0.74, 0.98, 1.02, 1.02, 1.12, 1.34, 2.02, 1.68, 1.12, 1.38, 1.14, 1.16, 1.22, 1.10, 1.14, 1.16, 1.28, 1.44, 2.58, 1.30, 1.20, 1.16, 1.06, 1.06, 1.08, 1.00, 1.00, 0.92, 1.00, 1.02, 1.00, 1.06, 1.10, 1.14, 1.08, 1.00, 1.04, 1.10, 1.06, 1.06, 1.06, 1.02, 1.04, 0.96, 0.96, 0.96, 0.92, 0.84, 0.88, 0.90, 1.00, 1.08, 0.80, 0.90, 0.98, 1.00, 1.10, 1.24, 1.66, 1.94, 1.02, 1.06, 1.08, 1.10, 1.30, 1.10, 1.12, 1.20, 1.16, 1.26, 1.42, 2.18, 1.26, 1.06, 1.00, 1.04, 1.00, 0.98, 0.94, 0.88, 0.98, 0.96, 0.92, 0.94, 0.96, 0.96, 0.94, 0.90, 0.92, 0.96, 0.96, 0.96, 0.98, 0.90, 0.90, 0.88, 0.88, 0.88, 0.90, 0.78, 0.84, 0.86, 0.92, 1.00, 0.68, 0.82, 0.90, 0.88, 0.98, 1.08, 1.36, 2.04, 0.98, 0.96, 1.02, 1.20, 0.98, 1.00, 1.08, 0.98, 1.02, 1.14, 1.28, 2.04, 1.16, 1.04, 0.96, 0.98, 0.92, 0.86, 0.88, 0.82, 0.92, 0.90, 0.86, 0.84, 0.86, 0.90, 0.84, 0.82, 0.82, 0.86, 0.86, 0.84, 0.84, 0.82, 0.80, 0.78, 0.78, 0.76, 0.74, 0.68, 0.74, 0.80, 0.80, 0.90, 0.60, 0.72, 0.80, 0.82, 0.86, 0.94, 1.24, 1.92, 0.92, 1.12, 0.90, 0.90, 0.94, 0.90, 0.90, 0.94, 0.98, 1.08, 1.24, 2.04, 1.04, 0.94, 0.86, 0.86, 0.86, 0.82, 0.84, 0.76, 0.80, 0.80, 0.80, 0.78, 0.80, 0.82, 0.76, 0.76, 0.76, 0.76, 0.78, 0.78, 0.76, 0.76, 0.72, 0.74, 0.70, 0.68, 0.72, 0.70, 0.64, 0.70, 0.72, 0.74, 0.64, 0.62, 0.74, 0.80, 0.82, 0.88, 1.02, 1.66, 0.94, 0.94, 0.96, 1.00, 1.16, 1.02, 1.04, 1.06, 1.02, 1.10, 1.22, 1.94, 1.18, 1.12, 1.06, 1.06, 1.04, 1.02, 0.94, 0.94, 0.98, 0.96, 0.96, 0.98, 1.00, 0.96, 0.92, 0.90, 0.86, 0.82, 0.90, 0.84, 0.84, 0.82, 0.80, 0.80, 0.76, 0.80, 0.82, 0.80, 0.72, 0.72, 0.76, 0.80, 0.76, 0.70, 0.74, 0.82, 0.84, 0.88, 0.98, 1.44, 0.96, 0.88, 0.92, 1.08, 0.90, 0.92, 0.96, 0.94, 1.04, 1.08, 1.14, 1.66, 1.08, 0.96, 0.90, 0.86, 0.84, 0.86, 0.82, 0.84, 0.82, 0.84, 0.84, 0.84, 0.84, 0.82, 0.86, 0.82, 0.82, 0.86, 0.90, 0.84, 0.82, 0.78, 0.80, 0.78, 0.74, 0.78, 0.76, 0.76, 0.70, 0.72, 0.76, 0.72, 0.70, 0.64] # Now let's generate random data for the same period d1 = np.random.random(365) assert len(d) == len(d1) fig = plt.figure() ax1 = fig.add_subplot(221) ax1.scatter(d, d1, alpha=0.5) ax1.set_title('No correlation') ax1.grid(True) ax2 = fig.add_subplot(222) ax2.scatter(d1, d1, alpha=0.5) ax2.set_title('Ideal positive correlation') ax2.grid(True) ax3 = fig.add_subplot(223) ax3.scatter(d1, d1*-1, alpha=0.5) ax3.set_title('Ideal negative correlation') ax3.grid(True) ax4 = fig.add_subplot(224) ax4.scatter(d1, d1+d, alpha=0.5) ax4.set_title('Non ideal positive correlation') ax4.grid(True) plt.tight_layout() plt.show()
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 198, 2, 4445, 2989, 5182, 329, 21179, 705, 2704, 3618, 6, 329, 257, 614, 198, 67, 796, 685, 198, 352, 13, 3023, 11, 352, 13, 3023, 1...
1.638581
1,804
from dataclasses import dataclass import logging from attributes import get_ability_modifier from sourcetree.utils import ( get_feats_list, get_feat_perks, get_feat_proficiencies, get_feat_requirements, ) from stdio import prompt log = logging.getLogger("thespian.tweaks")
[ 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, 11748, 18931, 198, 198, 6738, 12608, 1330, 651, 62, 1799, 62, 4666, 7483, 198, 6738, 11348, 66, 316, 631, 13, 26791, 1330, 357, 198, 220, 220, 220, 651, 62, 5036, 1381, 62, 4868, ...
2.757009
107
import os import platform import numpy if __name__ == '__main__': trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TrainSet' testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TestSet' # # trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy' # testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy - Copy' # # # trainFolder = '/home/leonidas/Desktop/images/train' # # testFolder = '/home/leonidas/Desktop/images/test' # # [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, outputClasses] = \ # load_dataset(trainFolder, testFolder,imageSize=(3,3),convertion='L',imageChannels=1) # # print trainArray.shape # print trainArray # # print validation_labels # # print train_labels # # print trainArray # # print trainArray.shape # print train_labels.shape # print testArray.shape # print test_labels.shape # print validationArray.shape # print validation_labels.shape # # trainPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512.txt' # testPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512.txt' # trainLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512_labels.txt' # testLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512_labels.txt' # [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, # outputClasses] = loadFeatures(trainPath=trainPath, trainLabels=trainLabelPath, testPath=testPath, # testLabels=testLabelPath); i=0; for trainArray,train_labels in Matrix().getArrayOfImagesUsingMiniBatches(folderPath=trainFolder,image_size=(100,100),batch_size=15): print (trainArray.shape) print (train_labels.shape) i+=len(trainArray) print "aaasdasdas d : ",i # # print validation_labels # # print train_labels # # print trainArray # # print trainArray.shape # print train_labels.shape # print testArray.shape # print test_labels.shape # print validationArray.shape # print validation_labels.shape
[ 11748, 28686, 198, 11748, 3859, 198, 11748, 299, 32152, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 628, 220, 220, 220, 4512, 41092, 796, 705, 34, 7479, 14490, 59, 75, 13, 2100, 12421, 271, 59, 36881, 59, 34349...
2.509847
914
# -*- coding: utf-8 -*- # Copyright (c) 2018 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from networking_calico.common import config as calico_config from networking_calico.compat import log from networking_calico import datamodel_v3 from networking_calico.plugins.ml2.drivers.calico.syncer import ResourceSyncer LOG = log.getLogger(__name__) # Each OpenStack security group is mapped to a Calico NetworkPolicy. A VM's # security group membership is represented by the VM having a label for each # security group that it belongs to; thus the selector # 'has(<security-group-label>)' represents the VMs that belong to that security # group. # # The label for each security group is 'sg.projectcalico.org/openstack-' # followed by the security group ID, and the name of the NetworkPolicy for each # security group is 'ossg.default.' followed by the security group ID. SG_LABEL_PREFIX = 'sg.projectcalico.org/openstack-' SG_NAME_LABEL_PREFIX = 'sg-name.projectcalico.org/openstack-' SG_NAME_MAX_LENGTH = (datamodel_v3.SANITIZE_LABEL_MAX_LENGTH - len(SG_NAME_LABEL_PREFIX)) SG_NAME_PREFIX = 'ossg.default.' def policy_spec(sgid, rules): """Generate JSON NetworkPolicySpec for the given security group.""" # <rules> can include those for several security groups. Pick out the # rules for the security group that we are translating right now. sg_rules = (r for r in rules if r['security_group_id'] == sgid) # Split the rules based on direction, and map to Calico form. inbound_rules = [] outbound_rules = [] for rule in sg_rules: if rule['direction'] == 'ingress': inbound_rules.append(_neutron_rule_to_etcd_rule(rule)) else: outbound_rules.append(_neutron_rule_to_etcd_rule(rule)) return { 'ingress': inbound_rules, 'egress': outbound_rules, 'selector': 'has(%s)' % (SG_LABEL_PREFIX + sgid), } def _neutron_rule_to_etcd_rule(rule): """_neutron_rule_to_etcd_rule Translate a single Neutron rule dict to a single dict in our etcd format. """ ethertype = rule['ethertype'] etcd_rule = {'action': 'Allow'} # Map the ethertype field from Neutron to etcd format. etcd_rule['ipVersion'] = {'IPv4': 4, 'IPv6': 6}[ethertype] # Map the protocol field from Neutron to etcd format. if rule['protocol'] is None or rule['protocol'] == -1: pass elif rule['protocol'] == 'ipv6-icmp': etcd_rule['protocol'] = 'ICMPv6' elif rule['protocol'] == 'icmp': etcd_rule['protocol'] = {'IPv4': 'ICMP', 'IPv6': 'ICMPv6'}[ethertype] elif isinstance(rule['protocol'], int): etcd_rule['protocol'] = rule['protocol'] else: etcd_rule['protocol'] = rule['protocol'].upper() port_spec = None if rule['protocol'] == 'icmp' or rule['protocol'] == 'ipv6-icmp': # OpenStack stashes the ICMP match criteria in # port_range_min/max. icmp_fields = {} icmp_type = rule['port_range_min'] if icmp_type is not None and icmp_type != -1: icmp_fields['type'] = icmp_type icmp_code = rule['port_range_max'] if icmp_code is not None and icmp_code != -1: icmp_fields['code'] = icmp_code if icmp_fields: etcd_rule['icmp'] = icmp_fields else: # src/dst_ports is a list in which each entry can be a # single number, or a string describing a port range. if rule['port_range_min'] == -1: port_spec = None elif rule['port_range_min'] == rule['port_range_max']: if rule['port_range_min'] is not None: port_spec = [rule['port_range_min']] else: port_spec = ['%s:%s' % (rule['port_range_min'], rule['port_range_max'])] entity_rule = {} if rule['remote_group_id'] is not None: entity_rule['selector'] = 'has(%s)' % (SG_LABEL_PREFIX + rule['remote_group_id']) if rule['remote_ip_prefix'] is not None: entity_rule['nets'] = [rule['remote_ip_prefix']] LOG.debug("=> Entity rule %s" % entity_rule) # Store in source or destination field of the overall rule. if entity_rule: if rule['direction'] == 'ingress': etcd_rule['source'] = entity_rule if port_spec is not None: etcd_rule['destination'] = {'ports': port_spec} else: if port_spec is not None: entity_rule['ports'] = port_spec etcd_rule['destination'] = entity_rule LOG.debug("=> %s Calico rule %s" % (rule['direction'], etcd_rule)) return etcd_rule
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 2864, 17030, 64, 11, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, ...
2.399819
2,211
if __name__ == '__main__': with open('input.txt') as f: seq1 = f.readline().strip() seq2 = f.readline().strip() with open('BLOSUM62.txt') as f1: lines = [line.strip().split() for line in f1.readlines()] matrix = {(i[0], i[1]): int(i[2]) for i in lines} penalty = 5 alignment = '\n'.join(linear_space_global_alignment(seq1, seq2, matrix, penalty)) print(alignment)
[ 628, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 351, 1280, 10786, 15414, 13, 14116, 11537, 355, 277, 25, 198, 220, 220, 220, 220, 220, 220, 220, 33756, 16, 796, 277, 13, 961, 1370, ...
2.232804
189
import os, sys project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(project_path) import pickle if __name__ == "__main__": file_name = project_path + '/data/pathway_train_example.pkl' with open(file_name, 'rb') as f: data = pickle.load(f) trees_to_plot = [d['tree'] for d in data['generated_paths'][0:10]] create_tree_html(trees_to_plot, 'plotted_trees')
[ 11748, 28686, 11, 25064, 198, 198, 16302, 62, 6978, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 397, 2777, 776, 7, 834, 7753, 834, 22305, 198, 17597, 13, 6978, 13, 33295, 7, 1630...
2.365169
178
''' Homework assignment for the 'Python is easy' course by Pirple. Written be Ed Yablonsky. Snowman(Hangman) game. ''' from os import ( name as os_name, system as system_call, ) from os.path import ( abspath, dirname, join as join_path, ) ''' Screen displays game output ''' ''' Input represents game input device ''' ''' Art is a game art which is set of frames that get loaded from a text file. Draws its current frame on a screen. ''' ''' Riddle holds secret word and gets solved by guesses ''' ''' Game is a game itself ''' Game().play()
[ 7061, 6, 198, 28718, 6433, 16237, 329, 262, 705, 37906, 318, 2562, 6, 1781, 416, 10334, 1154, 13, 198, 198, 25354, 307, 1717, 575, 23117, 684, 2584, 13, 198, 198, 28974, 805, 7, 39, 648, 805, 8, 983, 13, 198, 7061, 6, 198, 198, ...
2.817734
203
""" Created on Jul 5, 2012 @author: lichtens """ import csv import os
[ 37811, 198, 41972, 319, 5979, 642, 11, 2321, 198, 198, 31, 9800, 25, 300, 30830, 641, 198, 37811, 198, 11748, 269, 21370, 198, 11748, 28686, 198 ]
2.730769
26
""" ========================================================== Fitting model on imbalanced datasets and how to fight bias ========================================================== This example illustrates the problem induced by learning on datasets having imbalanced classes. Subsequently, we compare different approaches alleviating these negative effects. """ # Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com> # License: MIT # %% print(__doc__) # %% [markdown] # Problem definition # ------------------ # # We are dropping the following features: # # - "fnlwgt": this feature was created while studying the "adult" dataset. # Thus, we will not use this feature which is not acquired during the survey. # - "education-num": it is encoding the same information than "education". # Thus, we are removing one of these 2 features. # %% from sklearn.datasets import fetch_openml df, y = fetch_openml("adult", version=2, as_frame=True, return_X_y=True) df = df.drop(columns=["fnlwgt", "education-num"]) # %% [markdown] # The "adult" dataset as a class ratio of about 3:1 # %% classes_count = y.value_counts() classes_count # %% [markdown] # This dataset is only slightly imbalanced. To better highlight the effect of # learning from an imbalanced dataset, we will increase its ratio to 30:1 # %% from imblearn.datasets import make_imbalance ratio = 30 df_res, y_res = make_imbalance( df, y, sampling_strategy={classes_count.idxmin(): classes_count.max() // ratio}, ) y_res.value_counts() # %% [markdown] # We will perform a cross-validation evaluation to get an estimate of the test # score. # # As a baseline, we could use a classifier which will always predict the # majority class independently of the features provided. # %% from sklearn.model_selection import cross_validate from sklearn.dummy import DummyClassifier dummy_clf = DummyClassifier(strategy="most_frequent") scoring = ["accuracy", "balanced_accuracy"] cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring) print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}") # %% [markdown] # Instead of using the accuracy, we can use the balanced accuracy which will # take into account the balancing issue. # %% print( f"Balanced accuracy score of a dummy classifier: " f"{cv_result['test_balanced_accuracy'].mean():.3f}" ) # %% [markdown] # Strategies to learn from an imbalanced dataset # ---------------------------------------------- # We will use a dictionary and a list to continuously store the results of # our experiments and show them as a pandas dataframe. # %% index = [] scores = {"Accuracy": [], "Balanced accuracy": []} # %% [markdown] # Dummy baseline # .............. # # Before to train a real machine learning model, we can store the results # obtained with our :class:`~sklearn.dummy.DummyClassifier`. # %% import pandas as pd index += ["Dummy classifier"] cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% [markdown] # Linear classifier baseline # .......................... # # We will create a machine learning pipeline using a # :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard, # we will need to one-hot encode the categorical columns and standardized the # numerical columns before to inject the data into the # :class:`~sklearn.linear_model.LogisticRegression` classifier. # # First, we define our numerical and categorical pipelines. # %% from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import OneHotEncoder from sklearn.pipeline import make_pipeline num_pipe = make_pipeline( StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True) ) cat_pipe = make_pipeline( SimpleImputer(strategy="constant", fill_value="missing"), OneHotEncoder(handle_unknown="ignore"), ) # %% [markdown] # Then, we can create a preprocessor which will dispatch the categorical # columns to the categorical pipeline and the numerical columns to the # numerical pipeline # %% from sklearn.compose import make_column_transformer from sklearn.compose import make_column_selector as selector preprocessor_linear = make_column_transformer( (num_pipe, selector(dtype_include="number")), (cat_pipe, selector(dtype_include="category")), n_jobs=2, ) # %% [markdown] # Finally, we connect our preprocessor with our # :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our # model. # %% from sklearn.linear_model import LogisticRegression lr_clf = make_pipeline(preprocessor_linear, LogisticRegression(max_iter=1000)) # %% index += ["Logistic regression"] cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% [markdown] # We can see that our linear model is learning slightly better than our dummy # baseline. However, it is impacted by the class imbalance. # # We can verify that something similar is happening with a tree-based model # such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of # classifier, we will not need to scale the numerical data, and we will only # need to ordinal encode the categorical data. # %% from sklearn.preprocessing import OrdinalEncoder from sklearn.ensemble import RandomForestClassifier num_pipe = SimpleImputer(strategy="mean", add_indicator=True) cat_pipe = make_pipeline( SimpleImputer(strategy="constant", fill_value="missing"), OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1), ) preprocessor_tree = make_column_transformer( (num_pipe, selector(dtype_include="number")), (cat_pipe, selector(dtype_include="category")), n_jobs=2, ) rf_clf = make_pipeline( preprocessor_tree, RandomForestClassifier(random_state=42, n_jobs=2) ) # %% index += ["Random forest"] cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% [markdown] # The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by # the class imbalanced, slightly less than the linear model. Now, we will # present different approach to improve the performance of these 2 models. # # Use `class_weight` # .................. # # Most of the models in `scikit-learn` have a parameter `class_weight`. This # parameter will affect the computation of the loss in linear model or the # criterion in the tree-based model to penalize differently a false # classification from the minority and majority class. We can set # `class_weight="balanced"` such that the weight applied is inversely # proportional to the class frequency. We test this parametrization in both # linear model and tree-based model. # %% lr_clf.set_params(logisticregression__class_weight="balanced") index += ["Logistic regression with balanced class weights"] cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% rf_clf.set_params(randomforestclassifier__class_weight="balanced") index += ["Random forest with balanced class weights"] cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% [markdown] # We can see that using `class_weight` was really effective for the linear # model, alleviating the issue of learning from imbalanced classes. However, # the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward # the majority class, mainly due to the criterion which is not suited enough to # fight the class imbalance. # # Resample the training set during learning # ......................................... # # Another way is to resample the training set by under-sampling or # over-sampling some of the samples. `imbalanced-learn` provides some samplers # to do such processing. # %% from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler from imblearn.under_sampling import RandomUnderSampler lr_clf = make_pipeline_with_sampler( preprocessor_linear, RandomUnderSampler(random_state=42), LogisticRegression(max_iter=1000), ) # %% index += ["Under-sampling + Logistic regression"] cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% rf_clf = make_pipeline_with_sampler( preprocessor_tree, RandomUnderSampler(random_state=42), RandomForestClassifier(random_state=42, n_jobs=2), ) # %% index += ["Under-sampling + Random forest"] cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% [markdown] # Applying a random under-sampler before the training of the linear model or # random forest, allows to not focus on the majority class at the cost of # making more mistake for samples in the majority class (i.e. decreased # accuracy). # # We could apply any type of samplers and find which sampler is working best # on the current dataset. # # Instead, we will present another way by using classifiers which will apply # sampling internally. # # Use of specific balanced algorithms from imbalanced-learn # ......................................................... # # We already showed that random under-sampling can be effective on decision # tree. However, instead of under-sampling once the dataset, one could # under-sample the original dataset before to take a bootstrap sample. This is # the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and # :class:`~imblearn.ensemble.BalancedBaggingClassifier`. # %% from imblearn.ensemble import BalancedRandomForestClassifier rf_clf = make_pipeline( preprocessor_tree, BalancedRandomForestClassifier(random_state=42, n_jobs=2), ) # %% index += ["Balanced random forest"] cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% [markdown] # The performance with the # :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than # applying a single random under-sampling. We will use a gradient-boosting # classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`. from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from imblearn.ensemble import BalancedBaggingClassifier bag_clf = make_pipeline( preprocessor_tree, BalancedBaggingClassifier( base_estimator=HistGradientBoostingClassifier(random_state=42), n_estimators=10, random_state=42, n_jobs=2, ), ) index += ["Balanced bag of histogram gradient boosting"] cv_result = cross_validate(bag_clf, df_res, y_res, scoring=scoring) scores["Accuracy"].append(cv_result["test_accuracy"].mean()) scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean()) df_scores = pd.DataFrame(scores, index=index) df_scores # %% [markdown] # This last approach is the most effective. The different under-sampling allows # to bring some diversity for the different GBDT to learn and not focus on a # portion of the majority class.
[ 37811, 198, 10052, 4770, 2559, 855, 198, 37, 2535, 2746, 319, 545, 27753, 40522, 290, 703, 284, 1907, 10690, 198, 10052, 4770, 2559, 855, 198, 198, 1212, 1672, 21290, 262, 1917, 18268, 416, 4673, 319, 40522, 1719, 198, 320, 27753, 6097,...
3.223756
3,839
#!/usr/bin/python import argparse import re parser = argparse.ArgumentParser(description='Fix page breaks in War of The Rebellion text') parser.add_argument('files', nargs='*', help='Files to process') args = parser.parse_args() for file in args.files: outfile = open(file + ".joined-pagebreak", "w") text = ''.join(open(file).readlines()) pages = re.split("PAGEBREAK\n", text) # Remove empty pages pages = [x for x in pages if x] for i in xrange(0, len(pages) - 1): # Remove extraneous blank lines pages[i] = re.sub("\n\n\n+", "\n\n", pages[i]) # Undo HTML entities pages[i] = re.sub("&amp;", "&", pages[i]) pages[i] = re.sub("&lt;", "<", pages[i]) pages[i] = re.sub("&gt;", ">", pages[i]) # Do the following a second time to handle cases of # &amp;amp;, which are common pages[i] = re.sub("&amp;", "&", pages[i]) m = re.match(r"^( *\[*CHAP\. [A-Z]+\.\]* *\n\n?)(.*)", pages[i], re.S) if m: pages[i] = m.group(2) print "Removed CHAP heading on page %s:\n[%s]\n" % (i, m.group(1)) m = re.match("(.*?)(\n?(?: *[0-9]+|S) *(?:R R(?: *[-_VY]+ *[^\n]*)?|R *-+ *[^\n]*)\n)(.*)$", pages[i], re.S) if m: pages[i] = m.group(1) + m.group(3) print "Removed R R notation on page %s:\n[%s]\n" % (i, m.group(2)) m = re.match(r"(.*?\n)(\n* *------+\n( *(?:[*+#@$|^\\/&~=>!?]|[abc] |[abc][A-Z])[^\n]*\n|\n)* *-------+\n+(?:[*+#@$|^\\/&~=>!?] *[A-Z][^\n]*\n|\n)*)$", pages[i], re.S) if m: pages[i] = m.group(1) print "Removed footnote on page %s:\n[%s]\n" % (i, m.group(2)) m = re.match("(.*?\n)(\n*[*]?MAP[^\n]*\n+)$", pages[i], re.S) if m: pages[i] = m.group(1) print "Removed MAP notation on page %s:\n[%s]\n" % (i, m.group(2)) while pages[i] and pages[i][-1] == "\n": pages[i] = pages[i][0:-1] if "\n" not in pages[i]: lastlinelen = len(pages[i]) else: m = re.match(".*\n([^\n]*)$", pages[i], re.S) assert m lastlinelen = len(m.group(1)) shortline = lastlinelen < 60 join = False hyphenjoin = False if not pages[i]: continue if len(pages[i]) >= 2 and pages[i][-1] == '-' and pages[i][-2].islower(): if shortline: msg = "PAGEBREAK SHORT-LINE HYPHEN, NOT JOINED" else: msg = "PAGEBREAK HYPHEN-JOINED" hyphenjoin = True join = True elif pages[i + 1] and pages[i + 1][0].islower(): if shortline: msg = "PAGEBREAK SHORT-LINE NEXT PAGE STARTS LOWERCASE, NOT JOINED" else: msg = "PAGEBREAK NEXT PAGE STARTS LOWERCASE, JOINED" join = True elif len(pages[i]) >= 3 and pages[i][-1] == '.' and pages[i][-2].isupper() and pages[i][-3] in ['.', ' ']: if shortline: msg = "PAGEBREAK SHORT-LINE ENDS WITH ABBREVIATION PERIOD, NOT JOINED" else: msg = "PAGEBREAK ENDS ABBREV-PERIOD, JOINED" join = True elif pages[i][-1] == '.': msg = "PAGEBREAK ENDS PERIOD, NOT JOINED" elif len(pages[i]) >= 2 and pages[i][-1] == '*' and pages[i][-2] == '.': msg = "PAGEBREAK ENDS PERIOD STAR, NOT JOINED" elif len(pages[i]) >= 2 and pages[i][-1] == '"' and pages[i][-2] == '.': msg = "PAGEBREAK ENDS PERIOD QUOTE, NOT JOINED" elif pages[i][-1] == ':': msg = "PAGEBREAK ENDS COLON, NOT JOINED" elif pages[i][-1] == ',': if shortline: msg = "PAGEBREAK ENDS SHORT-LINE COMMA, NOT JOINED" else: msg = "PAGEBREAK ENDS COMMA, JOINED" join = True else: if shortline: msg = "PAGEBREAK ENDS SHORT-LINE OTHER, NOT JOINED" else: msg = "PAGEBREAK ENDS OTHER, JOINED" join = True print "Page %s: %s" % (i, msg) if hyphenjoin: outfile.write(pages[i][0:-1]) elif join: outfile.write(pages[i] + " ") else: outfile.write(pages[i]) outfile.write("\n\n") outfile.write("\n%s\n" % msg) outfile.close()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 1822, 29572, 198, 11748, 302, 198, 198, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 7, 11213, 11639, 22743, 2443, 9457, 287, 1810, 286, 383, 34848, 2420, 11537, 198, 48610, 13,...
1.998482
1,976
if __name__ == "__main__": main("test") main("puzzle")
[ 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 7203, 9288, 4943, 198, 220, 220, 220, 1388, 7203, 79, 9625, 4943, 198 ]
2.166667
30
import numpy as np import tensorflow as tf import tf_seal.python.ops.seal_ops as ops from tensorflow.python.keras.utils import tf_utils from tensorflow.python.client import session as tf_session from tensorflow.python.framework import ops as tf_ops # def __sub__(self, other): # other = convert_to_tensor(other) # res = ops.big_sub(self._raw, other._raw) # return Tensor(res) def _fetch_function(seal_tensor): unwrapped = [convert_from_tensor(seal_tensor, dtype=tf.float64)] rewrapper = lambda components_fetched: components_fetched[0].astype(np.float64) return unwrapped, rewrapper def _feed_function(seal_tensor, feed_value): return [(seal_tensor._raw, feed_value)] def _feed_function_for_partial_run(seal_tensor): return [seal_tensor._raw] # this allows tf_seal.Tensor to be passed directly to tf.Session.run, # unwrapping and converting the result as needed tf_session.register_session_run_conversion_functions( tensor_type=Tensor, fetch_function=_fetch_function, feed_function=_feed_function, feed_function_for_partial_run=_feed_function_for_partial_run, ) # TODO(Morten) # this allows implicit convertion of tf_seal.Tensor to tf.Tensor, # but since the output dtype is determined by the outer context # we essentially have to export with the implied risk of data loss tf_ops.register_tensor_conversion_function(Tensor, _tensor_conversion_function) # this allows Tensor to pass the tf.is_tensor test tf_ops.register_dense_tensor_like_type(Tensor) # this allows tf_big.Tensor to be plumbed through Keras layers # but seems only truly useful when used in conjunction with # `register_tensor_conversion_function` tf_utils.register_symbolic_tensor_type(Tensor)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 198, 11748, 48700, 62, 325, 282, 13, 29412, 13, 2840, 13, 325, 282, 62, 2840, 355, 39628, 198, 198, 6738, 11192, 273, 11125, 13, 29412, 13, 6122, 292, 13, ...
2.908784
592
from django.core.exceptions import ObjectDoesNotExist from django.contrib import admin from django.utils.translation import ugettext_lazy as _ from django.shortcuts import render from django.conf import settings from .models import Language, Type, MusicFocus, Category, Topic, RTRCategory, Host, Note, RRule, Schedule, Show, TimeSlot from .forms import MusicFocusForm from datetime import date, datetime, time, timedelta admin.site.register(Language, LanguageAdmin) admin.site.register(Type, TypeAdmin) admin.site.register(MusicFocus, MusicFocusAdmin) admin.site.register(Category, CategoryAdmin) admin.site.register(Topic, TopicAdmin) admin.site.register(RTRCategory, RTRCategoryAdmin) admin.site.register(Host, HostAdmin) admin.site.register(Note, NoteAdmin) #admin.site.register(Schedule, ScheduleAdmin) admin.site.register(TimeSlot, TimeSlotAdmin) admin.site.register(Show, ShowAdmin)
[ 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 9515, 13921, 3673, 3109, 396, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 42625, 14208, 13, 26791, 13, 41519, 1330, 334, 1136, 5239, 62, 75, 12582, 355, 4808, 198,...
3.422642
265
import hashlib import json from time import time import pytest from app.chaine.blockchain import Blockchain def test_block_creation(a_valid_block, proof=123, previous_hash='abc'): bc = Blockchain() block_a_tester = bc.new_block(proof, previous_hash) assert block_a_tester['index'] == a_valid_block['index'] assert isinstance( block_a_tester['timestamp'], type(a_valid_block['timestamp']) ) assert block_a_tester['proof'] == a_valid_block['proof'] assert block_a_tester['previous_hash'] == a_valid_block['previous_hash']
[ 11748, 12234, 8019, 198, 11748, 33918, 198, 6738, 640, 1330, 640, 198, 11748, 12972, 9288, 198, 198, 6738, 598, 13, 354, 5718, 13, 9967, 7983, 1330, 29724, 628, 628, 628, 628, 198, 4299, 1332, 62, 9967, 62, 38793, 7, 64, 62, 12102, ...
2.639269
219
# from django.conf.urls import patterns, url, include # from django.views.generic import TemplateView # from . import views, APP_NAME # # urlpatterns = patterns('', # url(r'^$', views.index, name='%s.index' % APP_NAME), # ) from django.urls import path, re_path, include from . import views, APP_NAME from .api import LayerResource from tastypie.api import Api Resources_api = Api(api_name="api") Resources_api.register(LayerResource()) urlpatterns = [ re_path(r'^$', views.index, name='%s.index' % APP_NAME), path('styles/<str:layername>/', views.layer_styles, name='%s.layer_styles' % APP_NAME), path('styles/save/<str:layer_name>/<str:style_name>', views.save_style, name='%s.save_style' % APP_NAME), re_path(r'^proxy/geoserver/rest/(?P<suburl>.*)$', views.geoserver_rest_proxy, name='%s.proxy' % APP_NAME), re_path(r'^', include(Resources_api.urls)), ]
[ 2, 422, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 7572, 11, 19016, 11, 2291, 198, 2, 422, 42625, 14208, 13, 33571, 13, 41357, 1330, 37350, 7680, 198, 2, 422, 764, 1330, 5009, 11, 43504, 62, 20608, 198, 2, 198, 2, 19016, 33279, ...
2.605882
340
import os import glob import json from pathlib import Path from flask_restful import Api, Resource, reqparse from flask_jwt_extended import jwt_required from flask import Flask, request, escape, make_response, send_from_directory import utils # incase you can't install ansi2html it's won't break the api try: from ansi2html import Ansi2HTMLConverter except: pass current_path = os.path.dirname(os.path.realpath(__file__)) ''' render stdout content '''
[ 11748, 28686, 198, 11748, 15095, 198, 11748, 33918, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 42903, 62, 2118, 913, 1330, 5949, 72, 11, 20857, 11, 43089, 29572, 198, 6738, 42903, 62, 73, 46569, 62, 2302, 1631, 1330, 474, 46569, 62,...
3.086093
151
"""Module with hahomematic services.""" from __future__ import annotations from datetime import datetime import logging from hahomematic.const import ( ATTR_ADDRESS, ATTR_INTERFACE_ID, ATTR_NAME, ATTR_PARAMETER, ATTR_VALUE, HmPlatform, ) from hahomematic.device import HmDevice from hahomematic.entity import BaseEntity, GenericEntity import voluptuous as vol from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, ATTR_TIME from homeassistant.core import HomeAssistant, ServiceCall from homeassistant.helpers import device_registry as dr import homeassistant.helpers.config_validation as cv from homeassistant.helpers.config_validation import comp_entity_ids from homeassistant.helpers.device_registry import DeviceEntry from homeassistant.helpers.service import ( async_register_admin_service, verify_domain_control, ) from .const import ( ATTR_PARAMSET, ATTR_PARAMSET_KEY, ATTR_RX_MODE, ATTR_VALUE_TYPE, DOMAIN, ) from .control_unit import ControlUnit, HaHub from .helpers import get_device_address_at_interface_from_identifiers _LOGGER = logging.getLogger(__name__) ATTR_CHANNEL = "channel" ATTR_DEVICE_ID = "device_id" DEFAULT_CHANNEL = 1 SERVICE_EXPORT_DEVICE_DEFINITION = "export_device_definition" SERVICE_PUT_PARAMSET = "put_paramset" SERVICE_SET_DEVICE_VALUE = "set_device_value" SERVICE_SET_INSTALL_MODE = "set_install_mode" SERVICE_SET_VARIABLE_VALUE = "set_variable_value" HAHM_SERVICES = [ SERVICE_EXPORT_DEVICE_DEFINITION, SERVICE_PUT_PARAMSET, SERVICE_SET_DEVICE_VALUE, SERVICE_SET_INSTALL_MODE, SERVICE_SET_VARIABLE_VALUE, ] SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION = vol.Schema( { vol.Required(ATTR_DEVICE_ID): cv.string, } ) SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema( { vol.Required(ATTR_ENTITY_ID): comp_entity_ids, vol.Required(ATTR_NAME): cv.string, vol.Required(ATTR_VALUE): cv.match_all, } ) SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema( { vol.Required(ATTR_INTERFACE_ID): cv.string, vol.Optional(ATTR_TIME, default=60): cv.positive_int, vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])), vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper), } ) SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema( { vol.Required(ATTR_DEVICE_ID): cv.string, vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int), vol.Required(ATTR_PARAMETER): vol.All(cv.string, vol.Upper), vol.Required(ATTR_VALUE): cv.match_all, vol.Optional(ATTR_VALUE_TYPE): vol.In( ["boolean", "dateTime.iso8601", "double", "int", "string"] ), vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper), } ) SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema( { vol.Required(ATTR_DEVICE_ID): cv.string, vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int), vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper), vol.Required(ATTR_PARAMSET): dict, vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper), } ) def _get_device(hass: HomeAssistant, device_id: str) -> HmDevice | None: """Return the homematic device.""" device_registry = dr.async_get(hass) device_entry: DeviceEntry | None = device_registry.async_get(device_id) if not device_entry: return None if ( data := get_device_address_at_interface_from_identifiers( identifiers=device_entry.identifiers ) ) is None: return None device_address = data[0] interface_id = data[1] if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id): return control_unit.central.hm_devices.get(device_address) return None def _get_interface_channel_address( hass: HomeAssistant, device_id: str, channel: int ) -> tuple[str, str] | None: """Return interface and channel_address with given device_id and channel.""" device_registry = dr.async_get(hass) device_entry: DeviceEntry | None = device_registry.async_get(device_id) if not device_entry: return None if ( data := get_device_address_at_interface_from_identifiers( identifiers=device_entry.identifiers ) ) is None: return None device_address = data[0] interface_id = data[1] channel_address = f"{device_address}:{channel}" return interface_id, channel_address def _get_entity(hass: HomeAssistant, entity_id: str) -> BaseEntity | None: """Return entity by given entity_id.""" control_unit: ControlUnit for control_unit in hass.data[DOMAIN].values(): if hm_entity := control_unit.async_get_hm_entity(entity_id=entity_id): if isinstance(hm_entity, BaseEntity): return hm_entity return None def _get_entities_by_platform( hass: HomeAssistant, platform: HmPlatform ) -> list[BaseEntity]: """Return entities by given platform.""" control_unit: ControlUnit hm_entities: list[BaseEntity] = [] for control_unit in hass.data[DOMAIN].values(): hm_entities.extend( control_unit.async_get_hm_entities_by_platform(platform=platform) ) return hm_entities def _get_hm_entity( hass: HomeAssistant, interface_id: str, channel_address: str, parameter: str ) -> GenericEntity | None: """Get homematic entity.""" if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id): return control_unit.central.get_hm_entity_by_parameter( channel_address=channel_address, parameter=parameter ) return None def _get_cu_by_interface_id( hass: HomeAssistant, interface_id: str ) -> ControlUnit | None: """Get ControlUnit by interface_id.""" for entry_id in hass.data[DOMAIN].keys(): control_unit: ControlUnit = hass.data[DOMAIN][entry_id] if control_unit and control_unit.central.clients.get(interface_id): return control_unit return None def _get_hub_by_entity_id(hass: HomeAssistant, entity_id: str) -> HaHub | None: """Get ControlUnit by device address.""" for entry_id in hass.data[DOMAIN].keys(): control_unit: ControlUnit = hass.data[DOMAIN][entry_id] if ( control_unit and control_unit.hub and control_unit.hub.entity_id == entity_id ): return control_unit.hub return None
[ 37811, 26796, 351, 387, 26452, 368, 1512, 2594, 526, 15931, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 11748, 18931, 198, 198, 6738, 387, 26452, 368, 1512, 13, 9979, 1330, 357, 198, 220...
2.404287
2,706
# Generated by Django 3.2.3 on 2021-06-03 00:35 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 17, 13, 18, 319, 33448, 12, 3312, 12, 3070, 3571, 25, 2327, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # The Universal Permissive License (UPL), Version 1.0 # # Subject to the condition set forth below, permission is hereby granted to any # person obtaining a copy of this software, associated documentation and/or # data (collectively the "Software"), free of charge and under any and all # copyright rights in the Software, and any and all patent rights owned or # freely licensable by each licensor hereunder covering either (i) the # unmodified Software as contributed to or provided by such licensor, or (ii) # the Larger Works (as defined below), to deal in both # # (a) the Software, and # # (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if # one is included with the Software each a "Larger Work" to which the Software # is contributed by such licensors), # # without restriction, including without limitation the rights to copy, create # derivative works of, display, perform, and distribute the Software and make, # use, sell, offer for sale, import, export, have made, and have sold the # Software and the Larger Work(s), and to sublicense the foregoing rights on # either these or other terms. # # This license is subject to the following condition: # # The above copyright notice and either this complete permission notice or at a # minimum a reference to the UPL must be included in all copies or substantial # portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import re COPYRIGHT_HEADER = """\ /* * Copyright (c) 2017-2019, Oracle and/or its affiliates. * Copyright (c) 2014 by Bart Kiers * * The MIT License (MIT) * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ // Checkstyle: stop // JaCoCo Exclude //@formatter:off {0} """ PTRN_SUPPRESS_WARNINGS = re.compile(r"@SuppressWarnings.*") TRANSFORMS = [ replace_suppress_warnings, replace_rulectx, replace_localctx, ] if __name__ == '__main__': fpath = sys.argv[1] with open(fpath, 'r') as FILE: content = COPYRIGHT_HEADER.format(postprocess(FILE)) with open(fpath, 'w+') as FILE: FILE.write(content)
[ 2, 15069, 357, 66, 8, 2864, 11, 13130, 11, 18650, 290, 14, 273, 663, 29116, 13, 1439, 2489, 10395, 13, 198, 2, 8410, 5626, 8355, 5781, 6375, 22657, 46, 6089, 27975, 38162, 9947, 5626, 34444, 6375, 12680, 45811, 39837, 1137, 13, 198, ...
3.407306
1,095
from brownie import accounts, Wei, chain, ApeToken, ApeVaultFactory, ApeDistributor, ApeRegistry, ApeRouter, FeeRegistry, MockRegistry, MockVaultFactory, MockToken, MockVault
[ 6738, 7586, 494, 1330, 5504, 11, 29341, 11, 6333, 11, 317, 431, 30642, 11, 317, 431, 53, 1721, 22810, 11, 317, 431, 20344, 2455, 273, 11, 317, 431, 8081, 4592, 11, 317, 431, 49, 39605, 11, 28522, 8081, 4592, 11, 44123, 8081, 4592, ...
3.050847
59
from django.contrib.auth.mixins import PermissionRequiredMixin from django.urls import reverse_lazy from django.views.generic import DetailView, ListView, UpdateView from .models import Committee
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 19816, 1040, 1330, 2448, 3411, 37374, 35608, 259, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 62, 75, 12582, 198, 6738, 42625, 14208, 13, 33571, 13, 41357, 1330, 42585, 7680, 11, ...
3.589286
56