query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Calculate the likelihoods for bernoulli
def calculate_likelihoods_bernoulli(data, labels, vocab): classes = set(labels) likelihoods = {} # Calculate likelihood for each class for cls in classes: documentsInClass = [set(map(lambda y: y[0], data[x])) for x in range(len(data)) if labels[x] == cls] numDocsInClass = len(documentsInClass) results = {} for word in vocab: numDocsWithWordInClass = len(filter(lambda x: word in x, documentsInClass)) # Binary variable-- either present or not present results[word] = laplace_smooth(numDocsWithWordInClass, numDocsInClass, 2) # Special laplace smoothing for words not found in training data results[None] = laplace_smooth(0, numDocsInClass, 2) likelihoods[cls] = results return likelihoods
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_prob_mle(X: np.ndarray, n: int) -> float:\n\n assert n > 1, \"for n = 1 use Bernoulli distribution.\"\n Binomial._check_input_data(X=X)\n Binomial._check_support(X=X, n=n)\n\n prob = X.mean() / n\n return prob", "def likelihood(self, data, hypo):\n tagged, n,...
[ "0.6838529", "0.6828888", "0.68097115", "0.674418", "0.67185456", "0.66190183", "0.66117036", "0.6602112", "0.65802485", "0.657394", "0.6549069", "0.65399593", "0.6539858", "0.65330833", "0.65202075", "0.64768666", "0.647659", "0.6444814", "0.6444441", "0.638896", "0.6378039"...
0.66105807
7
Extract the known vocabulary from our training data
def get_vocab(trainingData): return set(reduce(lambda x,y: x+y, map(lambda x: map(lambda y: y[0], x), trainingData), []))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list...
[ "0.7562361", "0.7207228", "0.7081693", "0.70540327", "0.7029069", "0.70169085", "0.7005524", "0.7005293", "0.7000013", "0.69506437", "0.69444656", "0.6888032", "0.6835956", "0.68238574", "0.6815228", "0.6788677", "0.6784426", "0.6746104", "0.6692373", "0.6628482", "0.660853",...
0.7412368
1
Parse and explode input data
def parse_data(filename): labels = [] documents = [] with open(filename, 'r') as f: for line in f: values = line.split() label = values[0] document = [] for wordCount in values[1:]: parsed = wordCount.split(':') word = parsed[0] count = int(parsed[1]) document.append((word, count)) labels.append(label) documents.append(document) return (labels, documents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def par...
[ "0.6394949", "0.6299175", "0.6273174", "0.62510943", "0.6217176", "0.62149054", "0.6148983", "0.61309695", "0.6124276", "0.6101856", "0.60483354", "0.5908739", "0.5896337", "0.588902", "0.587758", "0.58742684", "0.5872623", "0.5748684", "0.57401425", "0.57069176", "0.5701802"...
0.0
-1
Return elements from the iterable until it is exhausted. Then repeat the sequence indefinitely. cycle(seq) ==> seq[0], seq[1], ..., seq[n 1], seq[0], seq[1], ...
def cycle(seq, n=None): if n is not None: return Iter(_ncycle(n, seq)) return Iter(itertools.cycle(seq))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def repeat(iterable, coun...
[ "0.77041095", "0.73121995", "0.68373466", "0.68154997", "0.64987105", "0.64865166", "0.64855176", "0.61515063", "0.61507326", "0.6116417", "0.585476", "0.5825893", "0.5797128", "0.5789132", "0.57815135", "0.5774213", "0.577342", "0.5757885", "0.5697775", "0.56689405", "0.5658...
0.78215605
0
Returns the object for the specified number of times. If not specified, returns the object endlessly.
def repeat(obj, times=None): if times is None: return Iter(itertools.repeat(obj)) return Iter(itertools.repeat(obj, times))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repeat(self, count):\n return self.Sequence((self,) * count)", "def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)", "def twist(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(0, 50)\n time.sleep(.75)\n r.stop()\n ...
[ "0.6203525", "0.59771645", "0.5969791", "0.5901691", "0.5872414", "0.5850168", "0.58327895", "0.58200485", "0.57241195", "0.5649693", "0.56443447", "0.5643324", "0.56162107", "0.5555079", "0.5550507", "0.5531917", "0.54840827", "0.54764825", "0.5443261", "0.54094815", "0.5397...
0.64713174
0
Make infinite calls to a function with the given arguments. End sequence if func() raises StopIteration.
def repeatedly(func, /, *args, **kwargs): func = to_callable(func) try: while True: yield func(*args, **kwargs) except StopIteration as e: yield from stop_seq(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def iterate(func: Callable[..., T], x: T, *args, index: Index = None):\n func = to_callable(func)\n index = to_index_seq(index)\n\n if index is None...
[ "0.7004105", "0.6646999", "0.63588154", "0.633149", "0.6150608", "0.6094851", "0.6073668", "0.59957045", "0.5961045", "0.5894083", "0.5876988", "0.58702266", "0.5862073", "0.58489794", "0.5841767", "0.5814898", "0.57949513", "0.5713033", "0.5708868", "0.57078665", "0.5697791"...
0.7911098
0
Return iterator with a single object.
def singleton(obj: T, expand: bool = False) -> Iter[T]: if expand: try: yield from obj except TypeError: pass yield obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getIter(object):\n iterator = None\n try:\n iterator = iter(object)\n except TypeError:\n pass\n return iterator", "def __next__(self):\n return next(self.iterator)", "def __iter__(self):\n self._first()\n return self", "def __iter__(self):\n return s...
[ "0.75817907", "0.74081564", "0.7373555", "0.71963745", "0.71724164", "0.7078414", "0.7078414", "0.7078414", "0.6872086", "0.6868703", "0.683738", "0.67541516", "0.665635", "0.66446465", "0.6599324", "0.65719706", "0.65708953", "0.65270007", "0.6459694", "0.64549285", "0.64549...
0.6621986
14
Invert a fold. Similar to iterate, but expects a function of seed > (seed', x). The second value of the tuple is included in the resulting sequence while the first is used to seed func in the next iteration. Stops iteration if func returns None or raise StopIteration.
def unfold(func, seed): try: elem = func(seed) while elem is not None: seed, x = elem yield x elem = func(seed) except StopIteration as e: yield from stop_seq(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(f):\n return lambda y: search(lambda x: f(x) == y)", "def fold(iterable, func, base):\n acc = base\n for element in iterable:\n acc = func(acc, element)\n return acc", "def foldl(func, start, itr):\n return _foldl(func, start, iter(itr))", "def flip(func):\n if not callab...
[ "0.551734", "0.54347765", "0.5407587", "0.53981245", "0.5355724", "0.5321342", "0.5257516", "0.52478385", "0.50181824", "0.5012077", "0.49952018", "0.49402606", "0.493333", "0.493333", "0.4932859", "0.492038", "0.49162006", "0.4866282", "0.4826197", "0.480424", "0.48014733", ...
0.76988935
0
f""" Repeatedly apply a function func to input. If more than one argument to func is passed, it iterate over the past n values. It requires at least one argument, if you need to iterate a zero
def iterate(func: Callable[..., T], x: T, *args, index: Index = None): func = to_callable(func) index = to_index_seq(index) if index is None and not args: out = _iterate(func, x) elif index is None: out = _iterate_n(func, (x, *args)) else: if not args: out = _iterate_indexed(func, index, x) else: out = _iterate_indexed_n(func, index, (x, *args)) return Iter(out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _call_n(x, f, n, *args, **kwargs):\n return [f(i, x, *args, **kwargs) for i in range(n)]", "def loop(func, n):\n for i in range(n):\n func()", "def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "de...
[ "0.7455248", "0.7375146", "0.7370228", "0.71860796", "0.7169171", "0.69895", "0.6812954", "0.6778386", "0.675432", "0.6688753", "0.6684521", "0.665017", "0.6537547", "0.64917755", "0.63972217", "0.63972217", "0.6348087", "0.62363607", "0.62081873", "0.6142448", "0.6037471", ...
0.6898225
6
Create iterator from sequence of numbers.
def from_sequence(self, seq): return Iter(self._from_sequence(seq))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numeric_sequence_iteration(self) -> global___Statement.Iteration.NumericSequenceIteration:", "def simple_seq(seq):\n for i in seq:\n yield i", "def numbers():\n for number in range(1, 76):\n yield number", "def __iter__(self):\r\n \r\n return iter(self._by_number)", "d...
[ "0.6523413", "0.6352263", "0.6295878", "0.6285677", "0.6251581", "0.6161719", "0.6161719", "0.6123521", "0.59559906", "0.59429246", "0.5929433", "0.59055716", "0.5842463", "0.58171993", "0.57896346", "0.57666224", "0.57651085", "0.5762295", "0.5750994", "0.5719213", "0.571020...
0.70666045
0
Create iterator from slice object.
def from_slice(self, slice): start = 0 if slice.start is None else slice.start step = 1 if slice.step is None else slice.step return self.count(start, step, stop=slice.step)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slice(iterable, *args):\n return iter(it.islice(iterable, *args))", "def __getitem__(self, arg):\n if isinstance(arg, slice):\n # get value from slice\n start, stop, step = arg.start, arg.stop, arg.step\n # sanitize step\n if step is None:\n ...
[ "0.6592863", "0.6384958", "0.6363769", "0.62649775", "0.61393666", "0.60202515", "0.59986544", "0.5966865", "0.5952172", "0.5857153", "0.5757738", "0.574337", "0.5653791", "0.5642721", "0.56035215", "0.5587913", "0.5578734", "0.5572186", "0.5529783", "0.5510107", "0.5508437",...
0.6573793
1
Return values starting from start advancing by the given step.
def count(self, start=0, step=1, stop=None): out = itertools.count(start, step) if stop is not None: out = itertools.takewhile(lambda x: x < stop, out) return Iter(out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drange(start, stop, step):\n values=[]\n r = start\n while r <= stop:\n values.append(r)\n r += step\n return values", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def arange(self, start: float, stop: float, step: float = 1.0) -> None:\n self.valu...
[ "0.65679765", "0.64803994", "0.6422485", "0.64052194", "0.63530135", "0.62748873", "0.62696385", "0.6264165", "0.62514406", "0.6197221", "0.614684", "0.6128096", "0.6093126", "0.60743475", "0.60505", "0.6030288", "0.59912664", "0.597212", "0.5929738", "0.59283173", "0.5926146...
0.0
-1
Return a sequence of n evenly spaced numbers from a to b.
def evenly_spaced(self, a: Real, b: Real, n: int) -> Iter: return Iter(_evenly_spaced(a, b, n))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def genslices(n):\n return product(range(-n, n+1), range(-n, n+1), range(-n, n+1))", "def genslices(n):\n retur...
[ "0.67545396", "0.67385876", "0.6609418", "0.6586301", "0.6527549", "0.6490447", "0.64674157", "0.6464234", "0.64498013", "0.6438853", "0.6431555", "0.64294153", "0.6413279", "0.63846004", "0.63330746", "0.63312566", "0.6319206", "0.6290663", "0.6289663", "0.62633723", "0.6244...
0.7720257
0
Convert int to string without using builtin str()
def int_to_string(num): if num < 0: num, is_neg = -num, True else: is_neg = False s = [] while num > 0: s.append(chr(ord('0') + num%10)) num //= 10 return ('-' if is_neg else '') + ''.join(reversed(s))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _int2str(num):\n if num<10:\n return '00%s'%str(num)\n elif 10<=num<100:\n return '0%s'%str(num)\n else:\n return '%s'%str(num)", "def _int_to_string(v):\n \n if not isinstance(v,int):\n raise InstrumentParameterException('Value %...
[ "0.8262509", "0.7803891", "0.7300844", "0.7271281", "0.72517717", "0.718009", "0.7099532", "0.70860934", "0.7008732", "0.69923896", "0.68970364", "0.68703055", "0.6847767", "0.67874974", "0.6779158", "0.676237", "0.67338043", "0.6702256", "0.6671091", "0.6608676", "0.65521425...
0.78195685
1
Convert string to int
def string_to_int(s): return functools.reduce(lambda running_sum, c: running_sum * 10 + string.digits.index(c), s[s[0] == '-':], 0) * (-1 if s[0] == '' else 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _str_to_int(in_str):\n if in_str == '':\n return 0\n return int(in_str, 10)", "def to_int(s: str) -> int:\n try:\n return int(s.replace('_', ''))\n except ValueError:\n return int(ast.literal_eval(s))", "def dec2int(r: str) -> int:", "def to_int(str_val: str) -> int:\n\n ...
[ "0.82145184", "0.8003485", "0.79868853", "0.79608244", "0.7875066", "0.7750694", "0.7643904", "0.76203454", "0.7554625", "0.75325245", "0.7502179", "0.7484789", "0.74722457", "0.7454978", "0.7444927", "0.7426303", "0.7408998", "0.74060065", "0.73542196", "0.72918594", "0.7284...
0.7517601
10
this method retrieves a data file from an internet location. make sure it is not password protected or anything.
def get_data_file(url, data=None, headers={}): req = urllib2.Request(url=url) for key in headers: req.add_header(key, headers[key]) site = urllib2.urlopen(req) data = site.read() headers = site.info() site.close() output = open(local_file, 'w') output.write(data) output.close() return data, headers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(location):\n # This is factored out so we can use a different retrieval method if required.\n # Originally used urllib2, but it had SSL issues on my machine\n response = requests.get(location)\n return response.content", "def web_get_file(self, url):\n try:\n print(url)...
[ "0.6372641", "0.625264", "0.62305814", "0.62305814", "0.6188294", "0.6158766", "0.6149518", "0.6095374", "0.60664946", "0.60570997", "0.6010867", "0.60009617", "0.59899217", "0.59876364", "0.59813595", "0.59589547", "0.5951379", "0.5951082", "0.59479934", "0.5938637", "0.5925...
0.60267264
10
This method takes in a results file created by ibex, strips out the comments, concatenates lines together that should go together, and outputs complete lines to the specified text file
def parse_results_file(filename): file = open(filename, 'r') pretext=[line for line in file.readlines() if line.strip()] file.close() text = [] processed = [] languages = 'NONE' ID = 'NONE' moreheader = raw_input('Extra header labels from question field (e.g.: item,condition,factor1,factor2): ') stim_type = raw_input('What type are your stims? (i.e. AcceptabilityJudgment): ') output_loc = raw_input('Where would you like to put your parsed file? (enter filename path): ') #takes out comments for line in pretext: if re.match('#', line): continue else: text.append(line) first = 1; for line in range(len(text)): #get their info if re.search('Form', text[line]): if re.search('number', text[line]): ID = re.split('number,', text[line])[1].strip() elif re.search('age', text[line]): languages = re.split('age,', text[line])[1].strip() #looks for the main stimulus type, as entered earlier if re.search(stim_type, text[line]): if first: #print 'first' processed.append(str(ID+ ','+languages+','+text[line])) first=0 else: toAmend = processed.pop() #print str('toAmend: ' + toAmend) toAdd='' splits = re.split('NULL,', text[line]) for thing in splits[1:]: if thing is not '': toAdd = str(toAdd + ',' + thing.strip(',')) #print str('toAdd: ' + toAdd) processed.append(str(toAmend.strip()+ toAdd)) first = 1 #if the line is a question line, there's more to append if re.search('Question', text[line]): toAmend = processed.pop() part = re.split('\$', text[line])[1] part.strip('$') parts = part.split('%2C') processed.append(str(toAmend.strip()+ ','+ string.join(parts, ',')+'\n')) output = open(output_loc, 'w') header = 'ID,Languages,Time sent,MD5 Hash of IP Address,Controller,Item Number,Element Number,Type,Group,Stimulus,Answer,RT,' output.write(str(header+moreheader+'\n')) #put it all into a text file for line in processed: output.write(line) output.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(inputfname, outfname):\n with open(inputfname, 'rt', encoding='utf8') as fh:\n # first block\n reviews = []\n while True:\n comment = next(fh).strip()\n if not comment:\n # blank line, block separator\n break\n url_movi...
[ "0.60390484", "0.599767", "0.5916299", "0.5812747", "0.56795824", "0.5623025", "0.54872257", "0.5453515", "0.5433996", "0.53970265", "0.53352237", "0.5327005", "0.5286181", "0.5284214", "0.5239452", "0.522815", "0.5226091", "0.5211899", "0.52046853", "0.51947665", "0.5192673"...
0.5896646
3
Helper function for counting leading ""s
def leading(self, string: str) -> int: leading_amount = 0 while string[leading_amount] == "-": leading_amount += 1 return leading_amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_blank(bd):\n count = 0\n for num in bd:\n if num == \" \":\n return count\n else:\n count += 1", "def linecount(x):\n return sum(1 for char in x if char == \"\\n\")", "def test_number_start_word():\n assert syllapy.count(\"4dog\") == 0", "def srow(...
[ "0.6599933", "0.62831575", "0.62317944", "0.6203771", "0.6051275", "0.6027196", "0.599927", "0.5920738", "0.5860832", "0.5799355", "0.57977337", "0.57650226", "0.57611704", "0.57336843", "0.5714815", "0.5706141", "0.56840324", "0.5660799", "0.5651189", "0.5635546", "0.562362"...
0.62299573
3
Structurize the list of lines into a dict by counting the leading ""s > as values. We also get rid of the "" and whitespaces afterwards!
def structure(data: list) -> dict: structure = {} for i in range(0, len(data)): leading_ = Interpreter.leading(Interpreter(), data[i]) data_ele = data[i].replace("-", "").strip() if data_ele in structure: structure[data_ele + "%%%"] = leading_ structure[data_ele] = leading_ return structure
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dictFromLines(lines,sep=None):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n if sep == None or type(sep) == type(''):\n temp = dict([x.split(sep,1) for x in temp if x])\n else: #--Assume re object.\n temp = dict([sep.split(x,1) fo...
[ "0.6359959", "0.6270492", "0.6224244", "0.6006112", "0.599421", "0.5908032", "0.5810006", "0.579494", "0.57904506", "0.57837945", "0.5747093", "0.57200885", "0.56876975", "0.56852555", "0.5679893", "0.56574166", "0.56393373", "0.5639167", "0.55973274", "0.55856735", "0.556869...
0.0
-1
Entry point for the interpreter process.
def interpret(data: list) -> dict: return Interpreter.structure(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n main()", "def main():\n run_program()", "def main():\n pass", "def main():\n return", "def main() -> None:\n return", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main():\n args = parse_args()\n process_args...
[ "0.7234675", "0.7215261", "0.7081077", "0.7078648", "0.7007489", "0.6970343", "0.6970343", "0.6970343", "0.6970343", "0.6953635", "0.69507045", "0.6907722", "0.683263", "0.6791568", "0.67693144", "0.67693144", "0.67693144", "0.67693144", "0.67693144", "0.67693144", "0.6769314...
0.0
-1
Checks a files permissions against a permission requirement
def _does_perms_meet_req(stats, disallowed_perms): # There's undoubtedly some simple clever binary algebra way to do this vals_with = dict() vals_with['r'] = [4, 5, 6, 7] vals_with['w'] = [2, 3, 6, 7] vals_with['x'] = [1, 3, 5, 7] # Scopes are User, Group, and World scope = ['U', 'G', 'W'] sections = disallowed_perms.split(',') # Sections are the three sections in the disallowed string we are passed, # which represent user, group, and world. # If we didn't get 3 sections, it's malformed - pass the test with a note if len(sections) is not 3: return_result = TestResult(Result.SKIP, notes="Malformed permission req") else: did_pass = True reason = "" # Get numeric value for file permissions - eg 644 file_perms_num = oct(stats.st_mode & 0o777)[-3:] cur_pos = 0 for section in sections: cur_perm = file_perms_num[cur_pos] # If we're checking for read access and the numeric permission # indicates that read access is granted, it's failed... add why to # the notes if 'r' in section and int(cur_perm) in vals_with['r']: did_pass = False reason += scope[cur_pos] + ':r ' # Same for write access... if 'w' in section and int(cur_perm) in vals_with['w']: did_pass = False reason += scope[cur_pos] + ':w ' # and execute access if 'x' in section and int(cur_perm) in vals_with['x']: did_pass = False reason += scope[cur_pos] + ':x ' # Next time through the loop look at the next section cur_pos += 1 if did_pass: return_result = TestResult(Result.PASS) else: return_result = TestResult(Result.FAIL, notes=reason) return return_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_permissions(self):\n self.assertEqual(dir_perm, 0o2750)\n self.assertEqual(file_perm, 0o0440)", "def check_files_permissions(self):\n result = []\n interesting_files = [\n # directories\n '/etc/init.d'\n '/etc/cron.d',\n '/etc/cron....
[ "0.7191872", "0.68649065", "0.67243284", "0.6699847", "0.6546892", "0.6490462", "0.6473374", "0.6473374", "0.6460848", "0.64598584", "0.6364402", "0.6364259", "0.636375", "0.6361546", "0.6326846", "0.6268052", "0.62131613", "0.6208553", "0.6181878", "0.6179629", "0.6141624", ...
0.66519415
4
Function to change pick uncertainties based upon correlation values (saved in pick Comment). This works inplace on the catalog.
def reweight_picks(cat): from obspy.core.event import QuantityError for ev in cat: for pk in ev.picks: if pk.phase_hint == 'P': ccval = float(pk.comments[0].text.split('=')[-1]) # Re-weight based on some scheme (less down-weighting) if ccval > 0.3: pk.time_errors = QuantityError(uncertainty=0.05) return cat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def switch_cut_cor(self):\n if self.cut_cor == 41:\n self.cut_cor = 42\n elif self.cut_cor == 42:\n self.cut_cor = 41", "def change_mix_corr(self, \n q_values, lmax,\n new_corr,new_cospsi,\n **kwargs):\n PhaseRetriever.__init__(self,\n q_valu...
[ "0.55101144", "0.54023015", "0.5400192", "0.5268006", "0.5240452", "0.5010482", "0.4968746", "0.48149148", "0.4811697", "0.47968152", "0.47857383", "0.47856605", "0.47505182", "0.47229475", "0.46873763", "0.46638468", "0.46599233", "0.46597108", "0.46573195", "0.4628536", "0....
0.49707213
6
Function to plot a random sample from a catalog with picks
def simple_pick_plot(cat, n_events, template_dict, st_dict, pyasdf=None, savefiles=False): from obspy import Catalog, UTCDateTime, Stream from obspy.core.event import ResourceIdentifier if n_events == 'all': rand_cat = cat else: rand_cat = rand_cat_sample(cat, n_events) # Make a list of year + julday integers to loop over min_date = min([ev.preferred_origin().time for ev in rand_cat]) max_date = max([ev.preferred_origin().time for ev in rand_cat]) for date in daterange(min_date, max_date): day_cat = rand_cat.filter("time >= " + str(UTCDateTime(date)), "time <= " + str(UTCDateTime(date) + 86400)) if len(day_cat) == 0: continue stachans = {pk.waveform_id.station_code: [] for ev in day_cat for pk in ev.picks} for ev in day_cat: for pick in ev.picks: if pick.waveform_id.channel_code not in stachans[pick.waveform_id.station_code]: stachans[pick.waveform_id.station_code].append(pick.waveform_id.channel_code) print(stachans) # Read the waveforms for this day if pyasdf: st = Stream() with pyasdf.ASDFDataSet(pyasdf) as ds: for sta in stachans: for station in ds.ifilter(ds.q.station == str(sta), ds.q.channel == stachans[sta], ds.q.starttime >= UTCDateTime(date), ds.q.endtime <= UTCDateTime(date) + 86400): st += station.raw_recording for ev in day_cat: det_st = st_dict[ev.resource_id] det_temp = template_dict[ResourceIdentifier('smi:local/' + str(ev.resource_id).split('/')[-1].split('_')[0] + '_1sec')] fig = plot_repicked(det_temp, ev.picks, det_st, size=(21, 15), save=savefiles, savefile=str(ev.resource_id).split('/')[-1] + '.png', title=str(ev.resource_id).split('/')[-1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_random_sample(pattern, num_to_select, row_no, col_no, c_map=\"viridis\"):\n mpl.rc(\"image\", cmap=c_map)\n all_images = get_image_paths(pattern)\n sampled_img = get_rand_img(num_to_select, all_images)\n plot_grid(row_no, col_no, sampled_img)", "def sample_and_plot(self):\n fig = plt....
[ "0.66966414", "0.65395725", "0.6370684", "0.6370684", "0.63241494", "0.63104224", "0.6130251", "0.5980402", "0.59737", "0.5940193", "0.5889786", "0.58694214", "0.5850805", "0.5846972", "0.5820033", "0.581082", "0.5789322", "0.57852453", "0.5775959", "0.5773435", "0.5740156", ...
0.6025946
7
Plot a template over a detected stream, with picks corrected by lagcalc.
def plot_repicked(template, picks, det_stream, size=(10.5, 7.5), save=False, savefile=None, title=False): # _check_save_args(save, savefile) fig, axes = plt.subplots(len(det_stream), 1, sharex=True, figsize=size) if len(template) > 1: axes = axes.ravel() mintime = det_stream.sort(['starttime'])[0].stats.starttime template.sort(['network', 'station', 'starttime']) lengths = [] lines = [] labels = [] n_templates_plotted = 0 for i, tr in enumerate(det_stream.sort(['starttime'])): # Cope with a single channel template case. if len(det_stream) > 1: axis = axes[i] else: axis = axes tr_picks = [pick for pick in picks if pick.waveform_id.station_code == tr.stats.station and pick.waveform_id.channel_code[0] + pick.waveform_id.channel_code[-1] == tr.stats.channel[0] + tr.stats.channel[-1]] if len(tr_picks) > 1: msg = 'Multiple picks on channel %s' % tr.stats.station + ', ' + \ tr.stats.channel raise NotImplementedError(msg) if len(tr_picks) == 0: msg = 'No pick for chanel %s' % tr.stats.station + ', ' + \ tr.stats.channel print(msg) else: pick = tr_picks[0] pick_delay = pick.time - mintime delay = tr.stats.starttime - mintime y = tr.data # Normalise if len(tr_picks) > 0 and template: y /= max(abs(y[int(pick_delay/tr.stats.delta):int(pick_delay/tr.stats.delta) + len(template[0])])) else: y /= max(abs(y)) x = np.linspace(0, (len(y) - 1) * tr.stats.delta, len(y)) x += delay axis.plot(x, y, 'k', linewidth=1.5) axis.set_ylim(-max(abs(y)), max(abs(y))) if template.select(station=tr.stats.station, channel=tr.stats.channel): btr = template.select(station=tr.stats.station, channel=tr.stats.channel)[0] bdelay = pick.time - mintime by = btr.data by /= max(abs(by)) bx = np.linspace(0, (len(by) - 1) * btr.stats.delta, len(by)) bx += bdelay if len(tr_picks) > 0: # Heads up for the x - 0.1 fudge factor here accounting for template pre-pick time template_line, = axis.plot(bx - 0.1, by, 'r', linewidth=1.6, label='Template') if not pick.phase_hint: pcolor = 'k' label = 'Unknown pick' elif 'P' in pick.phase_hint.upper(): pcolor = 'red' label = 'P-pick' elif 'S' in pick.phase_hint.upper(): pcolor = 'blue' label = 'S-pick' else: pcolor = 'k' label = 'Unknown pick' pdelay = pick.time - mintime ccval = pick.comments[0].text.split('=')[-1] line = axis.axvline(x=pdelay, color=pcolor, linewidth=2, linestyle='--', label=label) axis.text(pdelay, max(by), ccval, fontsize=12) if label not in labels: lines.append(line) labels.append(label) if n_templates_plotted == 0: lines.append(template_line) labels.append('Template') n_templates_plotted += 1 lengths.append(max(bx[-1], x[-1])) else: lengths.append(bx[1]) axis.set_ylabel('.'.join([tr.stats.station, tr.stats.channel]), rotation=0, horizontalalignment='right') axis.yaxis.set_ticks([]) if len(det_stream) > 1: axis = axes[len(det_stream) - 1] else: axis = axes axis.set_xlabel('Time (s) from %s' % mintime.datetime.strftime('%Y/%m/%d %H:%M:%S.%f')) plt.figlegend(lines, labels, 'upper right') if title: if len(template) > 1: axes[0].set_title(title) else: axes.set_title(title) else: plt.subplots_adjust(top=0.98) plt.tight_layout() plt.subplots_adjust(hspace=0) if not save: plt.show() plt.close() else: plt.savefig(savefile) plt.close() return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xx_plot(epoch, model, features, filters, figname, fgal=0.5):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = ...
[ "0.59035116", "0.58859456", "0.578238", "0.5767331", "0.5742631", "0.57282263", "0.55894816", "0.5575051", "0.5565919", "0.55595213", "0.554277", "0.5542592", "0.55424595", "0.5524052", "0.5506664", "0.55034083", "0.5472254", "0.54397625", "0.54324085", "0.5425673", "0.541573...
0.6715108
0
Return a dictionary of information about a person
def build_person(first_name,last_name, age =''): person = { 'first': first_name.title(), 'last' : last_name.title()} if age: person['age'] = age return person
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_person(self):\n person_dict = {\n 'firstname': self.__firstname,\n 'lastname': self.__lastname,\n 'height': self.__height,\n 'weight': self.__weight,\n 'age': self.__age\n }\n return person_dict", "def who_am_i():\n return {'n...
[ "0.7562372", "0.7202004", "0.68984586", "0.6763077", "0.67415196", "0.6625842", "0.6569505", "0.65048116", "0.6392704", "0.63766086", "0.6367099", "0.6340469", "0.6332369", "0.6284719", "0.626071", "0.62580657", "0.62507343", "0.6244045", "0.62412703", "0.6240264", "0.623494"...
0.5966081
47
Carga toda la pila con enteros
def cargaAutoInt(pila): while not pila_llena(pila): dato = random.randint(0, 10) apilar(pila, dato)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comenzar_nuevo_juego():\n escena_uno.cargarEscena1(screen, display_width, display_height)#Se pone a correr la escena\n #escena_uno.cargarEscena2(screen, display_width, display_height)", "def Inicio():\n menu = \"\"\"\n Bienvenido al conversor de monedas 💰\n\n 1 - Pesos colombianos\n 2 - Pe...
[ "0.6960144", "0.62438864", "0.5807747", "0.5806468", "0.5786809", "0.5756569", "0.5700163", "0.5687251", "0.55761003", "0.5567695", "0.5562708", "0.55521", "0.5552041", "0.5549286", "0.5544261", "0.5521034", "0.5520254", "0.54952437", "0.54869676", "0.5480632", "0.54718786", ...
0.53275734
40
Carga tola la pila con strings
def cargaAutoStr(pila): while not pila_llena(pila): largo = random.randint(1, 15) apilar(pila, randString(largo))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comenzar_nuevo_juego():\n escena_uno.cargarEscena1(screen, display_width, display_height)#Se pone a correr la escena\n #escena_uno.cargarEscena2(screen, display_width, display_height)", "def stringToPila(palabra):\n pila = Pila()\n for elemento in palabra:\n apilar(pila, elemento)\n ret...
[ "0.58442247", "0.5648824", "0.56434804", "0.56288433", "0.5588605", "0.5584497", "0.5554773", "0.5449711", "0.54150164", "0.53789884", "0.5370985", "0.532739", "0.53188556", "0.5307319", "0.52647316", "0.51832354", "0.51682794", "0.51462793", "0.51443756", "0.51443756", "0.51...
0.59360135
0
Apila un elemento en la pila
def apilar(pila, dato): pila.tope += 1 pila.datos[pila.tope] = dato
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_podataka_sa_REST(self):\n urlUredjaj = self.cfg.get_konfig_element('REST', 'uredjaj')\n listaUredjaja = helper_funkcije.get_uredjaje_sa_REST(urlUredjaj)\n #inicijalizacija svih dostupnih uredjaja\n if listaUredjaja:\n for uredjaj in listaUredjaja:\n UN...
[ "0.60461986", "0.59805244", "0.59310794", "0.5646558", "0.5615027", "0.55744654", "0.5515526", "0.54436487", "0.5441662", "0.5441662", "0.54161835", "0.5410337", "0.5382265", "0.5358372", "0.53443366", "0.533257", "0.53211975", "0.53207725", "0.53205705", "0.53148395", "0.531...
0.5313862
21
Desapila el elemento en cima
def desapilar(pila): dato = pila.datos[pila.tope] pila.tope -= 1 return dato
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, *args, **kwargs):\n campo = Campostagimg.objects.filter(tag=self.tag, imagen=self.imagen)\n for c in campo:\n c.medidas = \"\"\n c.save()\n c.precision = 0\n c.save()\n c.v_esperado = \"\"\n c.save() \n\n ...
[ "0.61714435", "0.61389273", "0.6033699", "0.5995851", "0.5916466", "0.5895718", "0.58551925", "0.5754909", "0.5726794", "0.5719078", "0.5699067", "0.56908727", "0.5681546", "0.56783044", "0.5675249", "0.5675179", "0.56316787", "0.55975395", "0.55830395", "0.55497795", "0.5538...
0.6247548
0
Devuelve elemento de la cima
def cima(pila): return pila.datos[pila.tope]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(self):\n if self.is_empty():\n raise Empty('La cola está vacía')\n return self._head._element # frente alineado con la cabeza de la lista", "def getFactura(self): \n return self.caja", "def getFactura(self): \n return self.caja", "def Cima(self):\n i...
[ "0.62369305", "0.5968242", "0.5968242", "0.5929463", "0.5875082", "0.5795964", "0.574635", "0.57377017", "0.56929976", "0.566961", "0.5612241", "0.5606185", "0.5604151", "0.55805415", "0.55805415", "0.5572482", "0.555679", "0.5539605", "0.5539363", "0.5522629", "0.55216", "...
0.6678378
0
Muestra todos elementos en pila
def barrido(pila): paux = Pila() while not pila_vacia(pila): dato = desapilar(pila) print(dato) apilar(paux, dato) while not pila_vacia(paux): apilar(pila, desapilar(paux))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list(self):", "def listar_gabarito():\n return GabaritoProva.listar(gabarito)", "def listadoServicio(listServicios,codigoReserva):\n try:\n if(codigoReserva!=\"\"):\n variables.listado = listar(codigoReserva)\n listServicios.clear()\n for registro in variables....
[ "0.6206592", "0.6027048", "0.60082996", "0.5969509", "0.5969509", "0.5953642", "0.5931115", "0.58567864", "0.5827238", "0.5822924", "0.5788263", "0.5785534", "0.578256", "0.57585746", "0.5745181", "0.57410586", "0.5723462", "0.57228816", "0.56660974", "0.5607361", "0.5586085"...
0.0
-1
Devuelve la pila invertida
def invertir(pila1): pila2 = Pila() while not pila_vacia(pila1): apilar(pila2, desapilar(pila1)) return pila2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __invert__(self):\n return self.inverse()", "def inverse(self, x, y):", "def __invert(self, args):", "def invert(self,el):\n return el^(self.q-2)", "def invert(self):\n tmp = self.pvt\n self.pvt = self.nvt\n self.nvt = tmp\n tmp = self.pFace\n self.pFace...
[ "0.7260351", "0.7015506", "0.69790083", "0.6911814", "0.6806856", "0.6755985", "0.6703932", "0.67010486", "0.66792345", "0.6677001", "0.6664742", "0.66548175", "0.6640481", "0.65897536", "0.65876037", "0.6559641", "0.6545105", "0.65431285", "0.6482718", "0.64756066", "0.64310...
0.7615787
0
Devuelve una cadena aleatoria
def randString(largo=1): valores = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' return '' .join(random.choice(valores) for i in range(largo))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entrada_aluno(matricula):\n cod_curso = entrada_curso()\n print('> Em que ano, mês e dia você entrou na UFC? (YYYY-MM-DD)')\n data_de_ingresso = check.entrada('>>> ', check.data)\n print('> Em que data você vai concluir seu curso? (YYYY-MM-DD)')\n data_de_conclusao = check.entrada('>>> ', check....
[ "0.6110482", "0.58829135", "0.58471066", "0.5808168", "0.5769837", "0.5732787", "0.571716", "0.56439555", "0.5630807", "0.5551206", "0.55446774", "0.55200577", "0.5448135", "0.5437733", "0.5413697", "0.539102", "0.5377001", "0.5376293", "0.53636044", "0.53557664", "0.53530836...
0.0
-1
Devuelve pila del string ingresado
def stringToPila(palabra): pila = Pila() for elemento in palabra: apilar(pila, elemento) return pila
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_string2(self):\n pass", "def getApellidos(apellido):\n texto = f'El apellido es: {apellido}'\n return texto\n pass", "def psea(pname): # -> str:\n ...", "def print_as_text(pi):\n\n pi_string = str(\"%1.18f\" % pi)\n\n print(\"Definitive: \" + PI_STRING)\n\n print(\"Estimat...
[ "0.5971977", "0.59691834", "0.5927014", "0.59089327", "0.58850706", "0.5883489", "0.5877891", "0.5867647", "0.58412963", "0.5805934", "0.57704586", "0.5736824", "0.57056093", "0.5686816", "0.5681042", "0.5671489", "0.5662893", "0.56376225", "0.5629446", "0.56220335", "0.56079...
0.599477
0
renders html and takes a screenshot
def screenshot(url, path): # open in webpage driver = webdriver.PhantomJS() driver.set_window_size(1080, 800) driver.set_page_load_timeout(30) driver.get(url) driver.save_screenshot(path) driver.quit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_screenshot(html_file, year):\r\n\tdriver = webdriver.PhantomJS()\r\n\tdriver.set_window_size(800, 800)\r\n\tdriver.get(html_file)\r\n\r\n\t#allows the page to load completely\r\n\ttime.sleep(2)\r\n\r\n\timage = 'images/' + str(year) + '.png'\r\n\r\n\tdriver.save_screenshot(image)\r\n\tdriver.quit()\r\n\r\n...
[ "0.6781203", "0.67746735", "0.6765626", "0.6692511", "0.6436723", "0.62806684", "0.62287426", "0.62287426", "0.6214485", "0.61541307", "0.6063398", "0.6000311", "0.5993428", "0.59772694", "0.59754765", "0.5933216", "0.59321904", "0.58835065", "0.5881337", "0.5876187", "0.5874...
0.6651713
4
Transform incoming data to a homogeneous 2d array.
def transform(self, X): shape = len(X), self.max_len Xt = self.pad_value * np.ones(shape, dtype=self.dtype) for i, arr in enumerate(X): m = min(self.max_len, len(arr)) if not m: continue arr = np.array(arr[:m]) Xt[i, :m] = arr return Xt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, data):\n data = np.atleast_2d(data)\n\n if self.orientation == 'row':\n return data\n else:\n return data.T", "def _to_numpy_ndarray(cls, data):\n if isinstance(data, np.ndarray):\n return data\n arr = np.array(data, dtype=np.f...
[ "0.69984454", "0.69612616", "0.69326127", "0.68554795", "0.68044513", "0.6771031", "0.6694132", "0.6677301", "0.65618455", "0.64136314", "0.64136314", "0.6397039", "0.6386304", "0.63829464", "0.63655895", "0.63461405", "0.6290972", "0.6282495", "0.6213077", "0.6213077", "0.61...
0.0
-1
Transform incoming data to a homogeneous 3d array.
def transform(self, X): n = len(X) Xt = self.pad_value * np.ones((n,) + self.max_size, dtype=self.dtype) for i, arr in enumerate(X): m = min(self.max_size[0], len(arr)) if not m: continue arr = np.array(arr[:m], dtype=object) for j, vec in enumerate(arr): n = min(self.max_size[1], len(vec)) Xt[i, j, :n] = vec[:n] return Xt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cvt2array(tuples):\n rc = []\n for t in tuples:\n rc.append(point3d(np.float32(t[X]), np.float32(t[Y]), np.float32(t[Z])))\n return rc", "def mat_2d_to_3d(x, agg_num, hop):\n # Pad to at least one block. \n len_x, n_in = x.shape\n if (len_x < agg_num):\n x = np...
[ "0.6545719", "0.64571476", "0.6421992", "0.6398177", "0.6392814", "0.632745", "0.62147915", "0.6207807", "0.6147925", "0.61356354", "0.6131394", "0.612939", "0.61182773", "0.61020154", "0.6070016", "0.60693353", "0.605311", "0.6039241", "0.6023937", "0.60228825", "0.60228825"...
0.0
-1
Finalize the grades and print. Only for assessors.
def finalize(request, pk, version=0): ts = get_timeslot() if not hasattr(ts, 'resultoptions'): raise PermissionDenied("Results menu is not yet visible.") else: if not get_timeslot().resultoptions.Visible: raise PermissionDenied("Results menu is not yet visible.") dstr = get_object_or_404(Distribution, pk=pk) if not hasattr(dstr, 'presentationtimeslot'): raise PermissionDenied('This student does not have a presentation planned. Please plan it first.') if not request.user.is_superuser and \ request.user not in dstr.presentationtimeslot.Presentations.Assessors.all() and \ request.user != dstr.Proposal.Track.Head: raise PermissionDenied("You are not the correct owner of this distribution. " " Grades can only be finalized by assessors or track heads. " " To get a preview of the print view, use the 'Print Preview' button.") version = int(version) # check if grade is valid error_list = '' for cat in GradeCategory.objects.filter(TimeSlot=get_timeslot()): try: cat_res = cat.results.get(Distribution=dstr) if not cat_res.is_valid(): error_list += ('<li>Category {} is not completed.</li>'.format(cat)) except CategoryResult.DoesNotExist: error_list += ('<li>Category {} is missing</li>'.format(cat)) if error_list: return render(request, "base.html", context={ 'Message': '<h1>The results of this student are not yet finished</h1><p>The following error(s) occurred:</p><ul>{}</ul>'.format(error_list), "return": "results:gradeformstaff", "returnget": str(pk), }) if version == 0: # The normal page summarizing the grades of the student return render(request, "results/finalize_grades.html", { "dstr": dstr, "catresults": dstr.results.all(), "final": all(f.Final is True for f in dstr.results.all()), "finalgrade": dstr.TotalGradeRounded(), "preview": False, }) else: # type 1 and 2, finalize grades. if get_timephase_number() != 7: raise PermissionDenied("Finalize grades is only possible in the time phase 'Presentation of results'") for cat in dstr.results.all(): # set final to True, disable editing from here onward. cat.Final = True cat.save() if version == 1: # printable page with grades return render(request, "results/print_grades_pdf.html", { "dstr": dstr, "catresults": dstr.results.all(), "finalgrade": dstr.TotalGradeRounded(), }) elif version == 2: # pdf with grades html = get_template('results/print_grades_pdf.html').render({ "dstr": dstr, "catresults": dstr.results.all(), "finalgrade": dstr.TotalGradeRounded(), }) buffer = BytesIO() pisa_status = pisa.CreatePDF(html.encode('utf-8'), dest=buffer, encoding='utf-8') if pisa_status.err: raise Exception("Pisa Failed PDF creation in print final grade for distribution {}.".format(dstr)) buffer.seek(0) response = HttpResponse(buffer, 'application/pdf') response['Content-Disposition'] = 'attachment; filename="bepresult_{}.pdf"'.format(dstr.Student.usermeta.get_nice_name()) return response raise PermissionDenied('Invalid type.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n students = [\"Chris\", \"Jesse\", \"Sally\"]\n grades = [90, 80, 70]\n print_gradebook(students, grades)", "def finalize():\n\n print(\"\"\"\n The script analysis/sedov_compare.py can be used to analyze these\n results. That will perform an average at constant radiu...
[ "0.59876657", "0.57981944", "0.564417", "0.55673975", "0.55103207", "0.55028576", "0.5473704", "0.5460755", "0.5440873", "0.5409173", "0.53980476", "0.53825235", "0.5363283", "0.5363283", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.5349366...
0.5862954
1
Edit grade for a category as indexed by step. For each student as given by pk. Also edit the individual aspects of each grade category. For trackheads and responsible staff
def finalize_preview(request, pk, step=0): ts = get_timeslot() if not hasattr(ts, 'resultoptions'): raise PermissionDenied("Results menu is not yet visible.") else: if not get_timeslot().resultoptions.Visible: raise PermissionDenied("Results menu is not yet visible.") dstr = get_object_or_404(Distribution, pk=pk) if not hasattr(dstr, 'presentationtimeslot'): raise PermissionDenied('This student does not have a presentation planned. Please plan it first.') if not request.user.is_superuser and \ request.user != dstr.Proposal.Track.Head and \ request.user != dstr.Proposal.ResponsibleStaff and \ get_grouptype('3') not in request.user.groups.all() and \ request.user not in dstr.presentationtimeslot.Presentations.Assessors.all(): raise PermissionDenied("You do not have the correct permissions to view print preview.") return render(request, "results/finalize_grades.html", { "dstr": dstr, "catresults": dstr.results.all(), "final": all(f.Final is True for f in dstr.results.all()) if dstr.results.all() else False, "finalgrade": dstr.TotalGradeRounded(), "preview": True, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n UPDATE course_enrollments\n ...
[ "0.56488323", "0.5607644", "0.5588034", "0.5586747", "0.555214", "0.5505774", "0.5498233", "0.5445095", "0.52898294", "0.52345645", "0.518912", "0.51739746", "0.5155556", "0.51250327", "0.50697607", "0.50619745", "0.5057049", "0.5050862", "0.5034237", "0.5033281", "0.4990466"...
0.0
-1
Edit grade for a category as indexed by step. For each student as given by pk. Also edit the individual aspects of each grade category. For trackheads and responsible staff
def staff_form(request, pk, step=0): ts = get_timeslot() if not hasattr(ts, 'resultoptions'): raise PermissionDenied("Results menu is not yet visible.") else: if not get_timeslot().resultoptions.Visible: raise PermissionDenied("Results menu is not yet visible.") dstr = get_object_or_404(Distribution, pk=pk) if not hasattr(dstr, 'presentationtimeslot'): raise PermissionDenied('This student does not have a presentation planned. Please plan it first.') if not request.user.is_superuser and \ request.user != dstr.Proposal.Track.Head and \ request.user != dstr.Proposal.ResponsibleStaff and \ (get_grouptype('1') not in request.user.groups.all() or request.user not in dstr.Proposal.Assistants.all()) and \ get_grouptype('3') not in request.user.groups.all() and \ request.user not in dstr.presentationtimeslot.Presentations.Assessors.all(): raise PermissionDenied("You are not the correct owner of this distribution. " "Only track heads and responsible staff can edit grades.") cats = GradeCategory.objects.filter(TimeSlot=get_timeslot()).distinct() numcategories = len(cats) step = int(step) if step == 0: return render(request, "results/wizard.html", { "step": 0, "pk": pk, "categories": cats, "dstr": dstr, "final": all(f.Final is True for f in dstr.results.all()) if dstr.results.all() else False, # fix for all([])=True # "files": files, }) elif step <= numcategories: saved = False cat = cats[step - 1] try: # existing category result cat_result = CategoryResult.objects.get(Distribution=dstr, Category=cat) initial = None except CategoryResult.DoesNotExist: # new result cat_result = CategoryResult(Distribution=dstr, Category=cat) # initial = {'Files': list(StudentFile.objects.filter(Type=cat_result.Category.File, Distribution=cat_result.Distribution).distinct())} if request.method == "POST": # submitted form if cat_result.Final: return render(request, "base.html", context={ "Message": "Category Result has already been finalized! Editing is not allowed anymore. " "If this has to be changed, contact support staff" }) # if files: # category_form = CategoryResultFormFile(request.POST, instance=cat_result, prefix='catform') # else: category_form = CategoryResultForm(request.POST, instance=cat_result, prefix='catform') aspect_forms = [] for i, aspect in enumerate(cat.aspects.all()): try: # try find existing form aspect_result = CategoryAspectResult.objects.get(CategoryResult=cat_result, CategoryAspect=aspect) except CategoryAspectResult.DoesNotExist: # new clean form aspect_result = CategoryAspectResult(CategoryResult=cat_result, CategoryAspect=aspect) aspect_forms.append({ "form": AspectResultForm(request.POST, instance=aspect_result, prefix="aspect" + str(i)), "aspect": aspect, }) if category_form.is_valid() and all([form['form'].is_valid() for form in aspect_forms]): cat_result = category_form.save() # return the form with the cleaned grade, not the one with the (uncleaned) post data: # if files: # category_form = CategoryResultFormFile(instance=cat_result, prefix='catform') # else: category_form = CategoryResultForm(instance=cat_result, prefix='catform') for form in aspect_forms: # these forms do not need to be updated as aspect data is not cleaned. aspect_result = form['form'].instance aspect_result.CategoryResult = cat_result aspect_result.save() saved = True else: # if files: # category_form = CategoryResultFormFile(instance=cat_result, initial=initial, prefix='catform', disabled=cat_result.Final) # else: category_form = CategoryResultForm(instance=cat_result, prefix='catform', disabled=cat_result.Final) aspect_forms = [] for i, aspect in enumerate(cat.aspects.all()): try: aspect_result = CategoryAspectResult.objects.get(CategoryResult=cat_result, CategoryAspect=aspect) except CategoryAspectResult.DoesNotExist: aspect_result = CategoryAspectResult(CategoryResult=cat_result, CategoryAspect=aspect) aspect_forms.append({ "form": AspectResultForm(instance=aspect_result, prefix="aspect" + str(i), disabled=cat_result.Final), "aspect": aspect, }) return render(request, "results/wizard.html", { "step": step, "categories": cats, "category": cat, "categoryform": category_form, "aspectsforms": aspect_forms, "dstr": dstr, "pk": pk, "saved": saved, "final": cat_result.Final, "aspectlabels": CategoryAspectResult.ResultOptions, # "files": files, 'rounding': settings.CATEGORY_GRADE_QUANTIZATION }) else: raise PermissionDenied("This category does not exist.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n UPDATE course_enrollments\n ...
[ "0.5649238", "0.5606058", "0.55875236", "0.55865836", "0.5551528", "0.5505576", "0.5498032", "0.54429334", "0.52882886", "0.5234603", "0.5189582", "0.51738596", "0.5154065", "0.51239836", "0.5069172", "0.50602853", "0.50570875", "0.5049785", "0.50334764", "0.5032367", "0.4990...
0.46072274
66
Explanation about grading and grade categories.
def about(request, pk=None): if pk and get_grouptype('3') in request.user.groups.all(): ts = get_object_or_404(TimeSlot, pk=pk) else: ts = get_timeslot() return render(request, "results/about_grades.html", { 'scores': CategoryAspectResult.ResultOptions, "categories": GradeCategory.objects.filter(TimeSlot=ts), 'ts': ts, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_knowledge_category_terms(self):\n return # osid.grading.GradeQueryInspector", "def test_classify_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(5.00),\"A+\")", "def what_is_the_grade(self):\n\t\treturn_dict = {\n\t\t\t'section_title': self.title, \n\t\t\t'secti...
[ "0.625208", "0.6094908", "0.60138935", "0.5881098", "0.5708122", "0.5681825", "0.5661043", "0.56477875", "0.5588508", "0.5543916", "0.552775", "0.5490752", "0.5484218", "0.54239833", "0.5411527", "0.5353282", "0.53336465", "0.5323209", "0.5313484", "0.53094536", "0.5239492", ...
0.0
-1
List all aspects of a given grade category in the current timeslot
def list_aspects(request, pk): category = get_object_or_404(GradeCategory, pk=pk) aspects = GradeCategoryAspect.objects.filter(Category=category) ts = get_timeslot() return render(request, "results/list_aspects.html", { "aspects": aspects, 'ts': ts, 'cat': category, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_budget_analysis(cursor, plot_parameters, by_category=False):\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n ...
[ "0.5233529", "0.5082282", "0.5039836", "0.49076796", "0.49012667", "0.4887644", "0.4880797", "0.48579165", "0.48460177", "0.4806707", "0.48065493", "0.47938785", "0.478363", "0.47725368", "0.47473636", "0.47454002", "0.47437078", "0.47360337", "0.47296265", "0.47234464", "0.4...
0.71396613
0
Show a list of timeslots to import grades from.
def copy(request, pk=None): # do a copy if pk: ts = get_object_or_404(TimeSlot, pk=pk) if ts == get_timeslot(): raise PermissionDenied("It is not possible to copy the grades from the current timeslot.") if get_timeslot().gradecategories.exists(): return render(request, 'base.html', { 'Message': "The current timeslot already has grade categories." " Importing is not possible. " "Please remove the categories in the current timeslot before copying.", 'return': 'results:list_categories'}) if request.method == 'POST': form = ConfirmForm(request.POST) if form.is_valid(): for cat in ts.gradecategories.all(): old_id = cat.id old_aspects = cat.aspects.all() cat.id = None cat.TimeSlot = get_timeslot() cat.save() for aspect in old_aspects: aspect.id = None aspect.Category = cat aspect.save() return render(request, 'base.html', {'Message': 'Finished importing!', 'return': 'results:list_categories'}) else: form = ConfirmForm() return render(request, 'GenericForm.html', { 'form': form, 'formtitle': 'Confirm copy grade categories and aspects', 'buttontext': 'Confirm' }) # list possible timeslots to copy from else: tss = TimeSlot.objects.filter(gradecategories__isnull=False).distinct() return render(request, "results/list_copy.html", { "tss": tss, 'ts': get_timeslot(), })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_grades(state, from_dir):\n grading_manager = GradingManager(state.get_assignment(), from_dir)\n print_grades(grading_manager.grades(), state.user_name)", "def show_time(self):\n hour = str(datetime.datetime.now().strftime(\"%H\"))\n minute = str(datetime.datetime.now().strftime(\"%M\...
[ "0.5666389", "0.5338114", "0.5279143", "0.51949066", "0.51755744", "0.51437485", "0.51287425", "0.51153266", "0.51038504", "0.5092183", "0.5066873", "0.504833", "0.5036777", "0.5032361", "0.50242746", "0.5023967", "0.50215155", "0.4967114", "0.496651", "0.49573854", "0.493722...
0.0
-1
Error thrown when formatting a string but a config value is missing.
def __init__(self, key, parent=None): if parent: msg = f"Missing config while rendering {parent}: {key}" else: msg = f"Missing config: {key}" super(MissingConfiguration, self).__init__(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def test_single_specifier_missing(self):\n template = 'missing'\n value_count = 1\n msg = 'The formatter ...
[ "0.6889008", "0.65927273", "0.6343142", "0.6240334", "0.60607684", "0.60268956", "0.5999315", "0.5850035", "0.58428705", "0.5835579", "0.5832608", "0.58293426", "0.58019817", "0.57879996", "0.5760329", "0.5742128", "0.57138693", "0.5656415", "0.5598235", "0.5593386", "0.55930...
0.0
-1
Overloaded to implement recursive lazy evaluation of properties.
def __getattribute__(self, key): value = super(Config, self).__getattribute__(key) if key == "reserved" or key in self.reserved: return value else: return self.format(value, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lazyprop(fn):\n\n @property\n def _lazyprop(self):\n if not hasattr(self, _LAZY_PROP_VALUES):\n setattr(self, _LAZY_PROP_VALUES, {})\n lazy_props_dict = self.__dict__[_LAZY_PROP_VALUES]\n if fn.__name__ not in lazy_props_dict:\n lazy_props_dict[fn.__name__] = fn...
[ "0.6558812", "0.62286186", "0.6222235", "0.6111081", "0.6004813", "0.5979281", "0.5608828", "0.5590664", "0.5550107", "0.5419193", "0.53930444", "0.5368933", "0.5345204", "0.53392524", "0.5152866", "0.5152002", "0.514644", "0.5144571", "0.51304764", "0.5085058", "0.50584644",...
0.0
-1
Add a child config
def add(self, key, child_config): self.__dict__[key] = child_config child_config.root = self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_config(self, parent_node, child_value):\n edge_cost = self.cost(parent_node.value, child_value)\n child_node = Node(\n child_value,\n parent=parent_node,\n cost=parent_node.cost + edge_cost,\n depth=parent_node.depth + 1\n )\n parent_n...
[ "0.7265803", "0.6487871", "0.64664084", "0.62878114", "0.6264923", "0.6200054", "0.6032241", "0.59954774", "0.5990614", "0.59605205", "0.59455884", "0.59455884", "0.591581", "0.5893812", "0.5794489", "0.5790334", "0.57764447", "0.5702759", "0.56942874", "0.5688074", "0.568399...
0.7865205
0
Format strings using CONFIG object. This method uses python's builtin `str.format()` method. All root properties in CONFIG are passed in as kwargs. The properties lazy evaluate and recursively expand.
def format(self, value, key=None, **kwargs): if not isinstance(value, str): return value # always format strings using the root so the full path is available if self.root: return self.root.format(value, key, **kwargs) variables = CONFIG_VARIABLE_PATTERN.findall(value) expanded = {} for variable in variables: if variable not in kwargs: try: root_key = variable.split(".")[0] root = self.root if self.root else self expanded[root_key] = self.format(getattr(root, root_key), variable, **kwargs) except AttributeError: raise MissingConfiguration(variable, key) expanded.update(**kwargs) return value.format(**expanded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recursively_update_config(config, string_formatting_dict):\n\n for k in _iterate_list_or_dict(config):\n v = config[k]\n if isinstance(v, dict) or isinstance(v, list):\n recursively_update_config(v, string_formatting_dict)\n else:\n if _key_in_string(v, string_form...
[ "0.61522377", "0.61236", "0.60001415", "0.57900184", "0.57206047", "0.57040524", "0.5618517", "0.5617663", "0.56145966", "0.56098795", "0.5547251", "0.55374193", "0.5514143", "0.54919505", "0.5468669", "0.54580975", "0.54474247", "0.54033774", "0.5401085", "0.53975976", "0.53...
0.75447696
0
Directory where ixian is installed
def IXIAN(cls): import ixian return os.path.dirname(os.path.realpath(ixian.__file__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_teamocil_dir() -> pathlib.Path:\n return pathlib.Path(\"~/.teamocil/\").expanduser()", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph5/src\"", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__fil...
[ "0.59720445", "0.5938821", "0.5927095", "0.59131306", "0.58979726", "0.586261", "0.5823516", "0.58234173", "0.5788261", "0.57610697", "0.5754324", "0.57422614", "0.5687996", "0.565429", "0.5623156", "0.56043833", "0.5588141", "0.5566707", "0.55413353", "0.55124944", "0.550046...
0.7647922
0
Directory where ixian was run from
def PWD(cls): return pwd()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IXIAN(cls):\n import ixian\n\n return os.path.dirname(os.path.realpath(ixian.__file__))", "def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Ch...
[ "0.7374015", "0.6456549", "0.6438797", "0.64322007", "0.6419231", "0.6372672", "0.6354939", "0.635183", "0.6337567", "0.6308413", "0.6282552", "0.62738544", "0.62561786", "0.62519723", "0.62439907", "0.6243649", "0.62358207", "0.623114", "0.6230654", "0.6224367", "0.6190694",...
0.0
-1
Overloaded to implement recursive lazy evaluation of properties.
def __getattribute__(self, key): from ixian.task import TASKS formatted_key = key.lower() if formatted_key in TASKS: task = TASKS[formatted_key] return TaskConfig(task) else: return super(TasksConfig, self).__getattribute__(key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lazyprop(fn):\n\n @property\n def _lazyprop(self):\n if not hasattr(self, _LAZY_PROP_VALUES):\n setattr(self, _LAZY_PROP_VALUES, {})\n lazy_props_dict = self.__dict__[_LAZY_PROP_VALUES]\n if fn.__name__ not in lazy_props_dict:\n lazy_props_dict[fn.__name__] = fn...
[ "0.6558812", "0.62286186", "0.6222235", "0.6111081", "0.6004813", "0.5979281", "0.5608828", "0.5590664", "0.5550107", "0.5419193", "0.53930444", "0.5368933", "0.5345204", "0.53392524", "0.5152866", "0.5152002", "0.514644", "0.5144571", "0.51304764", "0.5085058", "0.50584644",...
0.0
-1
Tutte le possibili permutazioni di una lista
def all_perms(elements): if len(elements) <=1: yield elements else: for perm in all_perms(elements[1:]): for i in range(len(elements)): # nb elements[0:1] works in both string and list contexts yield perm[:i] + elements[0:1] + perm[i:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def permutations(lst):\n pass # Replace this with your implementation of the function.", "def permuta(L):\n return list(permuta_aux(L))", "def listDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return ind[~ok]", "def unique():\r\n \r\n lista = main...
[ "0.6468891", "0.636794", "0.62925", "0.6233623", "0.61992514", "0.6176376", "0.61639607", "0.6095989", "0.60220444", "0.5991031", "0.59071577", "0.58907956", "0.5854479", "0.5828909", "0.57634", "0.57418424", "0.57263476", "0.5719343", "0.5716875", "0.56898516", "0.56706095",...
0.0
-1
Trova uno zero della funzione f tra i punti a e b, dove la f assume segno discorde. Il parametro opzionale toll indica la precisione con cui si vuole calcolare il valore dello zero
def bisezione(f,a,b,toll=10**-5): m = (a+b)/2 f_m = f(m) while abs(f_m) > toll: if f(a)*f_m < 0: b = m elif f(b)*f_m < 0: a = m elif f_m == 0: print("Trovata solzione esatta") return m else: print("Metodo fallito") return None m = (a+b)/2 f_m = f(m) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Calcular(a: float) ->float:\n \n return (a*2)", "def p() -> float:\n return 0.9", "def f(x0: float, x1: float) -> float:\n return 8 - (x0 - 2) ** 2 - (x1 - 2) ** 2", "def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500", "def p2f(self):\n\n stale = self.m_...
[ "0.65174574", "0.6448639", "0.63864565", "0.6370828", "0.6296283", "0.6275515", "0.6270769", "0.6238707", "0.6191305", "0.61809313", "0.618049", "0.6167555", "0.6149057", "0.61406493", "0.61188513", "0.6118763", "0.61178863", "0.60342443", "0.6006515", "0.6004016", "0.6002765...
0.6965852
0
Incrementa la lista l intesa come numero (sequenza di cifre) contando in base b
def counter(l,b): if l == [b-1]*len(l): return i = 1 while(True): if l[-i] < b-1: l[-i] += 1 break else: l[-i] = 0 i += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def increment(b): \n if b == 11111111:\n return 00000000\n else:\n b = bin_to_dec(b)\n b = b + 1\n res = dec_to_bin (b)\n if len(res) == 8:\n return res\n else:\n c = 8 - len(res)\n return c*'0' + res", "def next_num():\r\n C...
[ "0.67676866", "0.62549233", "0.62549233", "0.6254061", "0.62319183", "0.6182245", "0.6147752", "0.608569", "0.60396475", "0.60262215", "0.5960267", "0.5946531", "0.5906772", "0.58969176", "0.58864105", "0.5877408", "0.58636934", "0.58469784", "0.5830072", "0.5778208", "0.5726...
0.62286186
5
combinations('ABCD', 2) > AB AC AD BC BD CD combinations(range(4), 3) > 012 013 023 123
def combinations(iterable, r): pool = tuple(iterable) n = len(pool) if r > n: return indices = list(range(r)) yield tuple(pool[i] for i in indices) while True: for i in reversed(range(r)): if indices[i] != i + n - r: break else: return indices[i] += 1 for j in range(i+1, r): indices[j] = indices[j-1] + 1 yield tuple(pool[i] for i in indices)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combinations(s, n):\n return (\"\".join(x) for x in tuples(s,n))", "def combinations(*comb, **kw):\n return _fixture_functions.combinations(*comb, **kw)", "def part_2():\n return itertools.permutations(range(5, 10))", "def get_combinations(text):\n combinations = []\n arr = []\n slen = ...
[ "0.718348", "0.7005977", "0.6993959", "0.6917465", "0.6894283", "0.68004334", "0.67992324", "0.6771768", "0.67690665", "0.67308486", "0.6717386", "0.6692334", "0.667273", "0.66375154", "0.6628557", "0.6606142", "0.6602187", "0.65536106", "0.65413445", "0.6535008", "0.6530812"...
0.5852057
65
Fornisce i divisori di n
def divisori(n): div=set() for i in range(1,int(n**0.5+1)): if n%i==0: div.add(int(n/i)) div.add(i) return sorted(div)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDivisors(n):", "def diviseur(n):\n s = 0\n for i in range (1, n):\n if n%i == 0:\n s += 1\n print(i)\n return \"Le nombre de diviseurs est\", s", "def divisior(n: int) -> list:\n j = [n]\n for d in range(n+1): #loop bis n\n d > 0", "def ge...
[ "0.8429677", "0.7996387", "0.78186643", "0.7653951", "0.75979793", "0.75530833", "0.74734193", "0.7455468", "0.74018323", "0.73676103", "0.73581624", "0.7303397", "0.7295638", "0.72913414", "0.72630566", "0.72216606", "0.7220337", "0.721529", "0.71952325", "0.71851534", "0.71...
0.73575246
11
Prime n cifre del numero num
def first_n_digits(num, n): return num // 10 ** (int(math.log(num, 10)) - n + 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isprime(n):\r\n\treturn is_prime(n)", "def comprobar_primo(num):\n primo = True\n for i in range(2, num):\n if num%i == 0:\n primo = False\n return primo", "def isprime(n):\n\treturn is_prime(n)", "def get_prime_factor(n):\n if n % 2 == 0:\n return 2\n for num in r...
[ "0.7530166", "0.7529172", "0.74141574", "0.7411982", "0.74033934", "0.7400811", "0.73903537", "0.7340318", "0.7339235", "0.7208471", "0.72076946", "0.71999794", "0.71911615", "0.7184746", "0.7175246", "0.71705663", "0.7161727", "0.71484905", "0.714309", "0.7138867", "0.713257...
0.0
-1
Verifica che un intero positivo sia un quadrato
def is_square(apositiveint): x = apositiveint // 2 seen = set([x]) while x * x != apositiveint: x = (x + (apositiveint // x)) // 2 if x in seen: return False seen.add(x) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quadrant(xcoord, ycoord):\n\n xneg = bool(xcoord < 0)\n yneg = bool(ycoord < 0)\n if xneg is True:\n if yneg is False:\n return 2\n return 3\n if yneg is False:\n return 1\n return 4", "def _point_in_tri(self, pos, tri):\n signs = np.sign([np.cross(tri[np...
[ "0.7044399", "0.68214154", "0.67046845", "0.668237", "0.65902036", "0.6427196", "0.6391145", "0.637763", "0.6324196", "0.6229131", "0.6208887", "0.62083405", "0.61662257", "0.6166056", "0.61422956", "0.6140599", "0.6139746", "0.61388415", "0.6134873", "0.61307234", "0.611599"...
0.0
-1
Ultime n cifre del numero num
def last_n_digits(num, n): return num%(10**n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def digit(number: int, n: int) -> int:\n return number // 10 ** n % 10", "def getNumber():", "def residuo_cero(numero):\n for x in range (1,10):\n if(numero % x == 0):\n return x \n return numero", "def CLng(num):\n return int(round(float(num)))", "def digito_verificacao(n...
[ "0.66533923", "0.6577843", "0.6550088", "0.6524935", "0.6478077", "0.6407476", "0.64042294", "0.63701874", "0.63546324", "0.6320073", "0.63194585", "0.63091135", "0.62533474", "0.6244983", "0.62103593", "0.61851996", "0.6173337", "0.61667633", "0.6162774", "0.6149299", "0.612...
0.62598634
12
Massimo comune denominatore tra a e b
def mcd(a, b): while(b != 0): a,b = b,a%b return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def denom(self, a):\n return self.one", "def denom(self, a):\n raise NotImplementedError", "def ppcm_denominateurs(self):\n\t\tl = []\n\t\tn = 1\n\t\tif self.__valide:\n\t\t\tfor m in self.liste_decroissante():\n\t\t\t\t\"\"\" les denominateurs sont positifs \"\"\"\n\t\t\t\te = m.get_coefficient(...
[ "0.64670295", "0.63709563", "0.6305553", "0.6273303", "0.62383693", "0.62260216", "0.6183728", "0.61380094", "0.61126524", "0.61095035", "0.6071675", "0.6056668", "0.60076785", "0.5992859", "0.5985773", "0.5965909", "0.5937595", "0.5930666", "0.5926027", "0.59098005", "0.5906...
0.0
-1
Restituisce una lista con tutti i numeri primi fino a n compreso col metodo del crivello di Eratostene
def primi(n): numVec = [] for x in range(n-1): numVec.append(x+2) for num in numVec[:(n//2-1)]: if numVec[num-2] != 0: numVec[slice(2*num-2, n-1, num)] = [0]*(n//num-1) numVec = [x for x in numVec if x!=0] return numVec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mk_lst_atnum(self):\n\t\telem_rnge=[]\n\t\tfor i in self.atom_num_lst:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telemen...
[ "0.6289262", "0.62472546", "0.6225951", "0.61801213", "0.60700405", "0.59977514", "0.5939416", "0.5908262", "0.5887603", "0.585121", "0.5849568", "0.5843982", "0.5811617", "0.57871395", "0.57503945", "0.5743789", "0.57214135", "0.5715634", "0.56780475", "0.56768674", "0.56632...
0.0
-1
Restituisce il radicale di n
def radicale(n): r = 1 for p in primi(n+1): if p>n: break if n%p==0: r *= p n = n//p return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_wyraz(a1,nr_wyrazu,r):\n return a1+(nr_wyrazu-1)*r", "def Arn(r, n):\n ret = 1\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret", "def _rnm(self, n, m, r):\n r_sum = 0\n m = int(abs(m))\n u = int((n-m)/2)\n v = int((n+m)/2)\n for s in range(0...
[ "0.6703785", "0.65736157", "0.6541312", "0.6525386", "0.6515335", "0.63054556", "0.6147716", "0.61371934", "0.61207694", "0.6074879", "0.60558325", "0.605559", "0.6001547", "0.59967726", "0.5936649", "0.5906539", "0.58998317", "0.5871071", "0.58641", "0.5848409", "0.58474976"...
0.68493146
0
Fornisce l'elemento successivo nella sequenza di Collatz
def collatz(n): if n%2==0: return n/2 else: return 3*n+1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def busca_sequencial_sentinela(lista, elemento):\n contador = 0\n lista.append(contador) \n try: \n while lista[contador] != elemento:\n contador += 1\n if contador == len(lista) - 1:\n del lista[-1]\n return -1\n \n del lista[-1]\n ...
[ "0.5769282", "0.5743955", "0.5429377", "0.53927815", "0.53786826", "0.5377348", "0.5373288", "0.5235085", "0.5226564", "0.5222602", "0.51664275", "0.5165773", "0.51485234", "0.51367635", "0.5131511", "0.5119195", "0.5055059", "0.5043993", "0.5040059", "0.5032709", "0.5030647"...
0.0
-1
Algoritmo di kruskal per la ricerca dell'MST di un grafo, fornito in tramite la sua matrice di adiacenza, usa la funzione ring_finder per cercare anelli nel grafo e di min_nonzero_idx per trovare gli inidici dei rami con costo minimo
def kruskal(m): n = m.shape[0] m_ret = np.zeros([n,n], dtype=int) while np.count_nonzero(m_ret) != 2*(n-1): i_min, j_min = min_nonzero_idx(m) n_min = m[i_min, j_min] m[i_min, j_min], m[j_min, i_min] = 0, 0 m_ret[i_min, j_min], m_ret[j_min, i_min] = n_min, n_min if ring_finder(m_ret, [i_min], []): m_ret[i_min, j_min], m_ret[j_min, i_min] = 0, 0 return m_ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kruskal(self):\n AGM = []\n i = j = 0\n \n self.grafo = sorted(self.grafo,key=lambda item:item[2])\n\n pai = []\n nivel = []\n\n for vertice in range(self.nVer):\n pai.append(vertice)\n nivel.append(0)\n\n while j < self.nVer-1:\n ...
[ "0.615799", "0.5902298", "0.58482414", "0.57372916", "0.57352954", "0.57080084", "0.56059325", "0.55901784", "0.5569681", "0.55468035", "0.5539385", "0.5525096", "0.5479864", "0.546164", "0.54158294", "0.5414072", "0.53997874", "0.53977036", "0.5389939", "0.5384784", "0.53628...
0.68557984
0
Implementation uses the MillerRabin Primality Test The optimal number of rounds for this test is 40
def miller_rabin(n,k): if n == 2: return True if n % 2 == 0: return False r, s = 0, n - 1 while s % 2 == 0: r += 1 s //= 2 for _ in range(k): a = random.randrange(2, n - 1) x = pow(a, s, n) if x == 1 or x == n - 1: continue for _ in range(r - 1): x = pow(x, 2, n) if x == n - 1: break else: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rabin_miller_is_prime(n, k=100):\n\n def basic_is_prime(_n):\n \"\"\"Basic check to see if input is a prime.\n Returns False if input number is a composite with at least one term being one of the primes below 10.000.\n Returns True if the number is a prime (can only be known if it is in...
[ "0.761824", "0.7428767", "0.73708177", "0.73257506", "0.73257506", "0.7152389", "0.704289", "0.69547087", "0.69087857", "0.6902767", "0.69024384", "0.68879527", "0.6800044", "0.6790292", "0.6718005", "0.66617966", "0.65989155", "0.65562785", "0.6515376", "0.6470473", "0.64568...
0.7301876
5
Get current NFL season After March, returns year of upcoming season.
def current_season() -> int: now = datetime.now() month, year = now.month, now.year if month < 4: year -= 1 return year
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def return_football_season(date=datetime.datetime.today()):\n date_aux = s...
[ "0.81641465", "0.7565352", "0.74286443", "0.6773904", "0.66487664", "0.6576127", "0.65571433", "0.6517643", "0.6495419", "0.64488924", "0.64429444", "0.64425147", "0.64404756", "0.6364527", "0.62375706", "0.61532784", "0.61332804", "0.6104992", "0.5971628", "0.5910558", "0.58...
0.8059089
1
Returns first Monday in September of given year
def _labor_day(year): day = datetime(year, 9, 1) delta = timedelta(days=1) while day.weekday() != 0: day += delta return day
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNu...
[ "0.74452096", "0.7358948", "0.71944577", "0.6750746", "0.66948074", "0.6441872", "0.63434887", "0.6304483", "0.6295698", "0.6277809", "0.62662905", "0.62631595", "0.6171451", "0.6164381", "0.6106714", "0.60991955", "0.60991955", "0.6070933", "0.60079587", "0.59685105", "0.593...
0.64837617
5
Get NFL week (ESPN scoring period) from date The year of the given date determines the relevant NFL season. Assumes week 1 begins the week of Labor Day and ends the following Wednesday. Does not cap value, so may be below 1 or above 17.
def get_week_from_date(date) -> int: month, year = date.month, date.year if month < 4: year -= 1 ld = _labor_day(year) wk1_wed = ld + timedelta(days=2) days_since = (date - wk1_wed).days weeks_since = days_since / 7. week = math.floor(weeks_since) + 1 return int(week)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n ...
[ "0.7253059", "0.6891157", "0.68909645", "0.6705168", "0.66147095", "0.6535417", "0.63276374", "0.6250827", "0.62457407", "0.61644757", "0.6102457", "0.60741466", "0.59655815", "0.59634364", "0.59619147", "0.5926681", "0.5889611", "0.5864661", "0.5860158", "0.5819684", "0.5810...
0.7307077
0
Get current NFL week (ESPN scoring period)
def current_week() -> int: now = datetime.now() return get_week_from_date(now)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_week(self):\n\n if not self.iso_equal() and self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 2\n if not self.iso_equal() or self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 1 \n return self.time_stamp_iso[1]", "def weekly():...
[ "0.7158167", "0.71226656", "0.7076009", "0.6954984", "0.6877098", "0.6862873", "0.6779078", "0.67491233", "0.66814524", "0.66672885", "0.6630186", "0.66270226", "0.642512", "0.6424101", "0.64128476", "0.6316389", "0.6287629", "0.62338805", "0.62046766", "0.61931086", "0.61917...
0.6960671
3
Find list of edl directories in all dependencies for the passed module
def get_edl_dirs(mod, gen_cfg): log.info("Fetching dependencies for %s", coordinates.as_path(mod.coords)) dependencies = mod.get_dependencies() edl_dirs = [mod.get_edl_path()] for dep, dep_coords in dependencies.items(): dep_cfg = gen_cfg.get_mod_cfg(dep) log.info("Dependency: %s", coordinates.as_path(dep_coords)) dep_edl_path = os.path.join(mod.mirror_root, coordinates.as_path(dep_coords, False)[1:], dep_coords.version, dep_cfg.edl_dir) edl_dirs.append(dep_edl_path) return edl_dirs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_module_search_path(self, pkg_name):\n pkg_location = self.get_package_location(pkg_name)\n module_search_path = [pkg_location, os.path.join(pkg_location,'lib')]\n st, cycle = graph.dfs(self.package_dependency, pkg_name)\n # computed packages on which this task depends\n ...
[ "0.65620816", "0.63926107", "0.6388437", "0.63874215", "0.6298563", "0.6170511", "0.6139222", "0.6134263", "0.6119241", "0.61146545", "0.608592", "0.6056361", "0.603119", "0.6020695", "0.5978215", "0.5966075", "0.5961092", "0.5923319", "0.5875171", "0.5873686", "0.58671385", ...
0.76784027
0
Update the symbol XML node
def edit_symbol_node(node, filename): size = int(re.findall('\d+', filename)[-1]) log.info('New filename %s; size %s', filename, size) node.set('typeId', SYMBOL_ID) node.find('name').text = 'DLS symbol' # Use PV name from rule in control PV for tooltip etc. # Reference that PV in rule to avoid duplication. pv_name = node.find('.//pv').text pv_element = et.Element('pv_name') pv_element.text = pv_name node.append(pv_element) node.find('.//pv').text = '$(pv_name)' rule_element = node.find('.//rule') rule_element.set('prop_id', 'image_index') rule_element.set('out_exp', 'true') file_element = et.Element('image_file') file_element.text = filename num_element = et.Element('symbol_number') num_element.text = '0' img_size_element = et.Element('sub_image_width') img_size_element.text = str(size) node.append(file_element) node.append(num_element) node.append(img_size_element) node.remove(node.find('opi_file'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_symbol(self, symbol):\r\n self.symbol = symbol", "def symbol(self, symbol):\n self._symbol = symbol", "def symbol(self, symbol):\n\n self._symbol = symbol", "def set_symbol(self, row, col, symbol):\n self.field[row, col] = symbol", "def setSymbolProps(self, name, symbol)...
[ "0.617891", "0.61759514", "0.61127055", "0.5838499", "0.56468177", "0.56205124", "0.5558976", "0.5554636", "0.5539334", "0.5484995", "0.54771584", "0.5455788", "0.54166734", "0.53877443", "0.5364725", "0.53485316", "0.5334409", "0.5323229", "0.53168017", "0.5301254", "0.51267...
0.65849906
0
Grep on the basepath to find all files that contain an EDM symbol widget. control
def build_filelist(basepath): log.info("Building list of files containing EDM symbols in %s", basepath) symbol_files = [] for dir_path, _, filenames in os.walk(basepath): for filename in filenames: filepath = os.path.join(dir_path, filename) if filename.endswith(".opi") and utils.grep(filepath, "EDM Symbol"): symbol_files.append(filepath) return symbol_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def searchfiles(pattern='C:\\\\RoboDK\\\\Library\\\\*.rdk'):\n import glob\n return glob.glob(pattern)", "def find_files(config, slot='*'):\n f_pattern = os.path.join(os.path.join(config['path'],config['led_name']), slot+'*' + config['led_name'] + '*'\n + config['current'] + ...
[ "0.6017925", "0.56027436", "0.5597905", "0.555413", "0.5544122", "0.54559", "0.5441576", "0.5366995", "0.53383917", "0.5280805", "0.5255213", "0.5246665", "0.5238796", "0.52089924", "0.5191325", "0.5161962", "0.51558715", "0.5154216", "0.51451254", "0.513593", "0.5125903", ...
0.60150164
1
Process one symbol file and convert to PNG.
def process_symbol(filename, mod, mod_cfg, mirror_root, prod_root): working_path = os.path.join(mirror_root, prod_root[1:]) log.debug("Finding version from %s", working_path) mod_version = utils.get_module_version(working_path, mod_cfg.area, mod, mod_cfg.version) log.info("Found version %s", mod_version) coords = coordinates.create(prod_root, mod_cfg.area, mod, mod_version) mirror_path = os.path.join(mirror_root, coordinates.as_path(coords)[1:]) full_path = os.path.join(mirror_path, mod_cfg.edl_dir, filename[:-3] + 'edl') destination = os.path.dirname(os.path.join(mirror_path, mod_cfg.opi_dir, filename)) log.info('Destination directory is {}'.format(destination)) if os.path.exists(destination): for f in os.listdir(destination): n = os.path.split(filename)[1] n = '.'.join(n.split('.')[:-1]) if f.startswith(n) and f.endswith('png'): log.info('Symbol png already exists: %s', f) return f else: log.warn('Failed to process symbol: %s does not exist', destination) return if os.path.exists(full_path): return files.convert_symbol(full_path, [destination]) else: log.warn('Symbol %s does not exist', full_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode():\r\n # Open the file with binary instructions\r\n with open(file_name) as file:\r\n lines = file.readlines()\r\n with open(PATH + file_name, \"w\") as file_write:\r\n for line in lines:\r\n file_write.write(line + \"\\n\")\r\n\r\n # Read the instruction...
[ "0.5763709", "0.55842566", "0.5484444", "0.54430854", "0.5442242", "0.5435863", "0.53775424", "0.53518033", "0.53421825", "0.53008115", "0.52987564", "0.5291246", "0.5290452", "0.5279297", "0.52617425", "0.5226984", "0.5217233", "0.52096176", "0.51862746", "0.51822567", "0.51...
0.61083233
0
calculate_angles(chunk) calculates elevation and azimuth given a jsonformatted chunk from ODAS
def calculate_angles(self,chunk): import math import collections Angles = collections.namedtuple("Angles", "ev az") x = float(chunk['x']) y = float(chunk['y']) z = float(chunk['z']) ev = round(90 - math.acos(z/math.sqrt(x*x+y*y+z*z))*180/math.pi) az = round(math.atan2(y,x)*180/math.pi) return(Angles(ev, az))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(all_blobs, all_angles):", "def extract_angles(self):\n atom_ids = self.contents['ID']\n angle_list = []\n for key, value in self.angles.items():\n a = value[0]\n b = value[1]\n c = value[2]\n\n lst = [a, b, c]\n\n A_ = np.asarr...
[ "0.56423277", "0.55796504", "0.5485317", "0.532156", "0.5300278", "0.50899595", "0.50719124", "0.50677234", "0.5040855", "0.5008711", "0.5001167", "0.50002366", "0.4993435", "0.49813247", "0.4962279", "0.49208593", "0.49165124", "0.49086887", "0.48858005", "0.48731053", "0.48...
0.7734531
0
Create a logger with console logging (info level) + log file (debug level).
def create_logger(log_dir): logger = logging.getLogger(__file__) logger.setLevel(logging.INFO) # file logger log_filename = "probabilist_connectogram_%s.log" % time.strftime("%Y-%m-%d_%H:%M:%S") if log_dir: log_path = os.path.join(log_dir, log_filename) else: log_path = log_filename file_handler = logging.FileHandler(log_path) formatter = logging.Formatter('%(asctime)s :: %(message)s') file_handler.setFormatter(formatter) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) # console logger console_handler = logging.StreamHandler() console_handler.setLevel(logging.DEBUG) console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.info("Log path: %s" % log_path) return logger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} crea...
[ "0.7268376", "0.70308805", "0.7029211", "0.7027607", "0.6950778", "0.6924888", "0.6906252", "0.6870268", "0.6851774", "0.68211913", "0.6813843", "0.6808933", "0.68085176", "0.6753689", "0.67525923", "0.6736932", "0.67316926", "0.67220473", "0.67183745", "0.6701308", "0.669111...
0.65178096
41
Create a command line argument parser, run it and return a dict mapping > .
def get_cmd_line_args(): usage = "%(prog)s <subject id> <nodif_brain> <bedpostx_dir> <outdir> [options]" parser = argparse.ArgumentParser(prog = "python probabilist_connectogram.py", usage = usage) # Required arguments parser.add_argument("subject_id", help="The name of the subject's folder in <SUBJECTS_DIR>.") parser.add_argument("nodif_brain", help="A preprocessed brain-only volume with bvalue=0.") parser.add_argument("bedpostx_dir", help="The bedpostx output directory for the subject's DWI data.") parser.add_argument("outdir", help="Directory where to output.") # Optional arguments parser.add_argument("--cortical-atlas", default="Desikan", choices=["Desikan", "Destrieux"], metavar="<atlas name>", help="Cortical atlas name, 'Desikan' (default) or 'Destrieux'") parser.add_argument("--remove-subcortical", action="store_true", help="Remove subcortical regions from the connectogram " "(Thalamus, Caudate, Putamen, Pallidum, Hippocampus, " "Amygdala, Accumbens-area and VentralDC).") parser.add_argument("--tracto-mask-type", default="nodif_brain", choices=TractoMaskTypes.choices, metavar="<tracto mask type>", help='The type of tractography mask to create, allowed types: ' '"nodif_brain" (default, whole brain), "wm", ' '"wm_dilated_1vox_6conn" or "wm_dilated_1vox_14conn". ' 'Two of the proposed white matter masks are dilated because a ' 'non-dilated white matter mask does not overlap with the "gray" ' 'subcortical regions, therefore the samples will never get there. ' 'Moreover the right and left white matter regions are much less ' 'connected without dilation, therefore the connectogram shows ' 'few interhemisphere connections with a simple white matter mask. ' '"wm_dilated_1vox_6conn" means white matter dilated by 1 voxel ' 'based one a 6-connexity structuring element.') parser.add_argument("--nsamples", type=int, default=5000, metavar="<nsamples>", help="Number of samples per voxel to initiate in seed " "region (default 5000).") parser.add_argument("--nsteps", type=int, default=2000, metavar="<nsteps>", help="Maximum number of steps for a sample (default 2000).") parser.add_argument("--fs-subjects-dir", metavar="<Freesurfer subjects directory>", help="To bypass the $SUBJECTS_DIR environment variable.") # Create a dict of arguments to pass to the 'main' function args = parser.parse_args() kwargs = vars(args) # Adapt one argument to the 'main' interface kwargs["add_subcortical"] = not kwargs["remove_subcortical"] del kwargs["remove_subcortical"] return kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_parse_args():\n parser = ArgumentParser()\n parser.add_argument('infile', help='path to the file to be mapped.It should\\\n contain one identifer on each line.')\n parser.add_argument('-rh', '--redis_host', default=DEFAULT_REDIS_URL,\n help='url of Re...
[ "0.7854356", "0.7411294", "0.738185", "0.7369688", "0.725296", "0.72444576", "0.7242427", "0.7233522", "0.7119903", "0.7105029", "0.70896465", "0.7079773", "0.7079739", "0.7069191", "0.706641", "0.7060959", "0.7060555", "0.7030648", "0.70134765", "0.69964945", "0.6994354", ...
0.0
-1
What are the most popular three articles of all time?
def getPopularArticles(): db = psycopg2.connect("dbname=news") c = db.cursor() c.execute(" select count (*) as views, title from articles " + "left join " + "log on concat('/article/', articles.slug) = log.path " + "group by title order by views desc limit 3") views = c.fetchall() db.close() return views
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def most_popular_articles():\n\n results = query_database(QUERIES[0])\n print('\\nWhat are the most popular three articles of all time?\\n')\n for title, views in results:\n print(' * \"{}\" -- {} views'.format(title, views))", "def most_popular_articles():\n print '1. The most popular article...
[ "0.7968826", "0.77808386", "0.7766518", "0.77630246", "0.7703104", "0.7636034", "0.7507648", "0.7505851", "0.74948984", "0.7427754", "0.74145854", "0.71255904", "0.71188134", "0.7111447", "0.7081126", "0.7029817", "0.69994766", "0.6986983", "0.69304246", "0.6920505", "0.68825...
0.7005649
16
Who are the most popular article authors of all time?
def getPopualrAuthors(): db = psycopg2.connect("dbname=news") c = db.cursor() c.execute(" select count(*) as views , authors.name from articles " + " inner join " + "log on concat('/article/', articles.slug) = log.path " + " inner join authors on articles.author = authors.id " + "group by name order by views desc; ") authors = c.fetchall() db.close() return authors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mostPopularAuthors():\n\n query = \"\"\"\n SELECT authors.name,COUNT(*) as views\n FROM articles join authors\n ON articles.author=authors.id\n JOIN log ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY authors.name\n ORDER BY views DE...
[ "0.8464116", "0.8395627", "0.8325832", "0.8321721", "0.81574434", "0.8025931", "0.7990663", "0.77915514", "0.778007", "0.76863235", "0.7682032", "0.7626295", "0.755735", "0.75209177", "0.74950725", "0.7390242", "0.7380563", "0.7352149", "0.73245054", "0.7269289", "0.7168644",...
0.67932266
28
Who are the most popular article authors of all time?
def getWorstDays(): db = psycopg2.connect("dbname=news") c = db.cursor() c.execute(" select c.* from" + "(select a.* , b.* , " + "(cast( b.total as decimal(16,4))/a.total)*100 as percent from" + " (select count(*) total , time::timestamp::date as timea " + "from log group by timea order by timea) as a, " + "(select count(*) total , time::timestamp::date as timea " + "from log where status <> '200 OK'" + "group by timea order by timea ) as b " + "where a.timea = b.timea) as c where c.percent > 1;") days = c.fetchall() db.close() return days
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mostPopularAuthors():\n\n query = \"\"\"\n SELECT authors.name,COUNT(*) as views\n FROM articles join authors\n ON articles.author=authors.id\n JOIN log ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY authors.name\n ORDER BY views DE...
[ "0.84637415", "0.8395699", "0.8325273", "0.8321845", "0.8157658", "0.80273575", "0.7990549", "0.7792252", "0.77818847", "0.7686615", "0.7682264", "0.76261973", "0.75585544", "0.7522089", "0.7496455", "0.73912114", "0.7381132", "0.735024", "0.73232555", "0.7270634", "0.7169818...
0.0
-1
Returns an "absolute" value for a timedelta, always representing a time distance.
def abs_timedelta(delta): if delta.days < 0: now = datetime.datetime.now() return now - (now + delta) return delta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abs_timedelta(delta):\r\n if delta.days < 0:\r\n now = _now()\r\n return now - (now + delta)\r\n return delta", "def delta(self, abs_value=False):\n return self.current - self.last if not abs_value else np.abs(self.current - self.last)", "def timedelta(self) -> datetime.timedelta...
[ "0.75997627", "0.6525403", "0.64160365", "0.6255736", "0.58985287", "0.58082616", "0.57936674", "0.573392", "0.569296", "0.56611925", "0.56431794", "0.56412184", "0.56401443", "0.5639377", "0.5544877", "0.5541361", "0.5524112", "0.5501755", "0.547358", "0.54723006", "0.545224...
0.7604547
0
Turn a value into a date and a timedelta which represents how long ago it was. If that's not possible, return (None, value).
def date_and_delta(value): now = datetime.datetime.now() if isinstance(value, datetime.datetime): date = value delta = now - value elif isinstance(value, datetime.timedelta): date = now - value delta = value else: try: value = int(value) delta = datetime.timedelta(seconds=value) date = now - delta except (ValueError, TypeError): return None, value return date, abs_timedelta(delta)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_and_delta(value):\r\n now = _now()\r\n if isinstance(value, datetime):\r\n date = value\r\n delta = now - value\r\n elif isinstance(value, timedelta):\r\n date = now - value\r\n delta = value\r\n else:\r\n try:\r\n value = int(value)\r\n ...
[ "0.77345294", "0.6244832", "0.6088585", "0.577737", "0.5701926", "0.56636906", "0.56362903", "0.559285", "0.5573411", "0.5541884", "0.5530724", "0.55193424", "0.55110997", "0.54977155", "0.54975235", "0.54929054", "0.5488044", "0.54846984", "0.545197", "0.5361616", "0.5360478...
0.7786376
0
Return the Hamming distance between equallength sequences
def __hamming_distance(s1, s2): if len(s1) != len(s2): raise ValueError("Undefined for sequences of unequal length") return sum(el1 != el2 for el1, el2 in zip(s1, s2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequen...
[ "0.7564989", "0.7524423", "0.7510748", "0.7498424", "0.73834527", "0.72914463", "0.7283258", "0.72027653", "0.719384", "0.7186409", "0.7184023", "0.7142286", "0.7130274", "0.71298635", "0.70760477", "0.70056623", "0.6964983", "0.69032145", "0.6894841", "0.68800247", "0.686782...
0.7553281
1
Implementation to serialize ``o`` argument.
def default(self, o: Decimal) -> Union[int, float]: if isinstance(o, Decimal): # NOTE: The below is potentially a HUGE MISTAKE and an # unnecessary OVER ENGINEERING! but this works. This is # not required as such because we can get around this by # converting everything to float by default but it makes # more sense to return response of ints as int and float as # float. return int(o) if float(o).is_integer() else float(o) return super().default(o)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self, obj):\n pass", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def serialize(self, obj):\n return obj", "def serialize(obj):\n return pickle.dumps(obj)", "def serialize_forstorage(cls, obj):\n return misc.serialize_forstorage(obj)", "def ...
[ "0.76455027", "0.71864474", "0.71055853", "0.6807475", "0.6794227", "0.66385746", "0.66352147", "0.6566663", "0.65611637", "0.6546735", "0.6452253", "0.64283395", "0.63991266", "0.6386263", "0.63441694", "0.6334144", "0.6327756", "0.6327756", "0.6322545", "0.6322545", "0.6317...
0.0
-1
Fundamental pretrained Ernie model
def __init__(self, cfg, name=''): nn.Layer.__init__(self) self.cfg = cfg d_model = cfg['hidden_size'] d_emb = cfg.get('emb_size', cfg['hidden_size']) d_vocab = cfg['vocab_size'] d_pos = cfg['max_position_embeddings'] # d_sent = cfg.get("sent_type_vocab_size", 4) or cfg.get('type_vocab_size', 4) if cfg.get('sent_type_vocab_size'): d_sent = cfg['sent_type_vocab_size'] else: d_sent = cfg.get('type_vocab_size', 2) self.n_head = cfg['num_attention_heads'] self.return_additional_info = cfg.get('return_additional_info', False) self.initializer = nn.initializer.TruncatedNormal(std=cfg['initializer_range']) self.ln = _build_ln(d_model, name=append_name(name, 'pre_encoder')) self.word_emb = nn.Embedding(d_vocab, d_emb, weight_attr=paddle.ParamAttr(name=append_name(name, 'word_embedding'), initializer=self.initializer)) self.pos_emb = nn.Embedding(d_pos, d_emb, weight_attr=paddle.ParamAttr(name=append_name(name, 'pos_embedding'), initializer=self.initializer)) # self.sent_emb = nn.Embedding( # d_sent, # d_emb, # weight_attr=paddle.ParamAttr(name=append_name(name, 'sent_embedding'), initializer=self.initializer)) self._use_sent_id = cfg.get('use_sent_id', True) self._use_sent_id = False if self._use_sent_id: self.sent_emb = nn.Embedding(d_sent, d_emb, weight_attr=paddle.ParamAttr(name=append_name(name, 'sent_embedding'), initializer=self.initializer)) self._use_task_id = cfg.get('use_task_id', False) self._use_task_id = False if self._use_task_id: self._task_types = cfg.get('task_type_vocab_size', 3) logging.info('using task_id, #task_types:{}'.format(self._task_types)) self.task_emb = nn.Embedding(self._task_types, d_emb, weight_attr=paddle.ParamAttr(name=append_name(name, 'task_embedding'), initializer=self.initializer)) prob = cfg['hidden_dropout_prob'] self.dropout = nn.Dropout(p=prob) self.encoder_stack = ErnieEncoderStack(cfg, append_name(name, 'encoder')) if cfg.get('has_pooler', True): self.pooler = _build_linear(cfg['hidden_size'], cfg['hidden_size'], append_name(name, 'pooled_fc'), self.initializer) else: self.pooler = None self.key_tag = None self._checkpoints = [] self.train()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pretrained():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659)", "def load_pretrained_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model_ft = models....
[ "0.64619243", "0.63850045", "0.63135374", "0.6303723", "0.62750506", "0.62338805", "0.6216982", "0.6216971", "0.6207287", "0.61998534", "0.6164519", "0.6160022", "0.61489826", "0.6124723", "0.61036414", "0.60924315", "0.6061764", "0.60476965", "0.60332507", "0.60278857", "0.6...
0.0
-1
return checkpoints for recomputing
def get_checkpoints(self): # recompute checkpoints return self._checkpoints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkpoint():", "def checkpoint_set():\n checkpoints.append(list())", "def get_all_overall_checkpoint(cls):\n return cls.create_all_overall_checkpoint()", "def get_checkpoint_list(cls):\n return cls.create_checkpoint_list()", "def checkpoint(self):\r\n return self._checkpoint", "d...
[ "0.744063", "0.70907867", "0.7052293", "0.69212705", "0.68812984", "0.664399", "0.66409785", "0.6579985", "0.6512506", "0.65115094", "0.65036887", "0.63288766", "0.624046", "0.6220209", "0.62004256", "0.61839545", "0.61744016", "0.6170368", "0.6168887", "0.61377853", "0.61085...
0.7678584
0
append name with postfix
def append_name(name, postfix): if name is None: ret = None elif name == '': ret = postfix else: ret = '%s_%s' % (name, postfix) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_name(name):\r\n\r\n\r\n return name + \"-apple\"", "def add_name(self, node):\n if 'name' in self.options:\n name = nodes.fully_normalize_name(self.options.pop('name'))\n if 'name' in node:\n del(node['name'])\n node['names'].append(name)\n ...
[ "0.6697877", "0.662184", "0.6614897", "0.6473923", "0.6254014", "0.6248267", "0.6226696", "0.6212746", "0.62123245", "0.60669625", "0.6044965", "0.6044965", "0.6044435", "0.60033596", "0.5903342", "0.5898558", "0.588601", "0.5874177", "0.5855147", "0.58351374", "0.5803622", ...
0.8279311
0
Lands the rover, and makes it part of the grid Throws an exception if A rover with that name already existed The rover being landed has a bad direction The rovers coordinates are off the grid A rover already exists on the gird at the rover's coordinates
def land_rover(self, rover): if self.rovers.get(rover.name): raise RoverException(ExceptionMessages.ROVER_ALREADY_LANDED) if not Rover.valid_direction(rover.direction): raise RoverException(ExceptionMessages.BAD_DIRECTION) if not self._is_coordinate_in_the_grid(rover.coordinate): raise RoverException(ExceptionMessages.OFF_GRID) if self._is_coordinate_occupied(rover.coordinate): raise RoverException(ExceptionMessages.ROVER_COLLISION) self.rovers[rover.name] = rover
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell ma...
[ "0.62105554", "0.620282", "0.59880394", "0.5978847", "0.5967129", "0.5945851", "0.5943394", "0.59243786", "0.5899155", "0.5898589", "0.5803447", "0.5776576", "0.5759452", "0.5755554", "0.5738934", "0.57336867", "0.5733618", "0.5726307", "0.57162607", "0.5699025", "0.5692675",...
0.68636113
0
Tries to navigate and reposition the rover on the gird. Throws an exception if It cannot find that rover on the grid A bad instruction is passed Executing the instruction string will cause a collision with another rover on the gird
def navigate_rover(self, name, instruction_str): rover = self.rovers.get(name) if not rover: raise RoverException(ExceptionMessages.BAD_NAME) coordinate = copy.deepcopy(rover.coordinate) direction = rover.direction for instruction in instruction_str: if instruction == 'L' or instruction == 'R': direction = self._direction_after_turning(direction, instruction) elif instruction == 'M': coordinate = self._coordinate_after_moving(direction, coordinate) else: raise RoverException(ExceptionMessages.INVALID_INSTRUCTION) # This means we have processed all the instructions without exception # assign new direction and coordinates to rover rover.direction = direction rover.coordinate = coordinate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_rover(grid, start_at, instructions, name='rover'):\n plateu = None\n try:\n if isinstance(grid, str):\n x_end, y_end = grid.split(' ')\n x_end = int(x_end)\n y_end = int(y_end)\n plateu = Plateu(x_end, y_end, name)\n\n elif isinstance(grid...
[ "0.6312876", "0.60473263", "0.5894628", "0.5894628", "0.5836864", "0.5819794", "0.581422", "0.5812412", "0.5812412", "0.58097583", "0.58019125", "0.5792342", "0.57617646", "0.5753274", "0.57401913", "0.5738901", "0.5717236", "0.5713577", "0.57098556", "0.57092357", "0.5694141...
0.6905907
0
Basically a state machine Given a instruction('R' or 'L') and a direction('N' or 'S' or 'E' or 'W'), returns the new direction Throws an exception in case of bad instruction
def _direction_after_turning(self, direction, instruction): next_left_states = {'N':'W', 'W': 'S', 'S': 'E', 'E': 'N'} next_right_states = {'N': 'E', 'E': 'S', 'S': 'W', 'W': 'N'} if instruction == 'R': return next_right_states[direction] elif instruction == 'L': return next_left_states[direction] else: raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_one_step(old_state, direction):\n assert direction in ['R', 'L', 'U', 'D']\n\n x, y = old_state\n if direction == 'R':\n return (x+1, y)\n if direction == 'L':\n return (x-1, y)\n if direction == 'U':\n return (x, y+1)\n if direction == 'D':\n return (x, y-1)", ...
[ "0.6593464", "0.62368506", "0.61913526", "0.5976613", "0.57964754", "0.5683937", "0.5672923", "0.5668834", "0.5567708", "0.5457504", "0.5439745", "0.5427397", "0.54177237", "0.53752434", "0.5332933", "0.53058296", "0.53014976", "0.52971196", "0.5290954", "0.5269749", "0.52675...
0.77884203
0
Returns a new coordinate after moving the rover, Based on the direction, it applies a movement of one grid and calculates the new coordinates. Its throws an exception if the new coordinate is off grid the new coordinate results in an collision with another rover
def _coordinate_after_moving(self, direction, coordinate): if direction == 'N': new_coordinate = Coordinate(coordinate.x, coordinate.y + 1) elif direction == 'S': new_coordinate = Coordinate(coordinate.x, coordinate.y - 1) elif direction == 'W': new_coordinate = Coordinate(coordinate.x - 1, coordinate.y) else: new_coordinate = Coordinate(coordinate.x + 1, coordinate.y) if not self._is_coordinate_in_the_grid(new_coordinate): raise RoverException(ExceptionMessages.OFF_GRID) if self._is_coordinate_occupied(new_coordinate): raise RoverException(ExceptionMessages.ROVER_COLLISION) return new_coordinate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n ...
[ "0.6662709", "0.6605978", "0.6600228", "0.6573474", "0.6549251", "0.65067387", "0.6477407", "0.6455114", "0.64177567", "0.6393052", "0.6361281", "0.6359388", "0.6349698", "0.63481045", "0.63376284", "0.63300747", "0.63202614", "0.6316886", "0.630494", "0.62923414", "0.6275278...
0.7444775
0
Return any binary tree that matches the given preorder and postorder traversals. Values in the traversals pre and post are distinct positive integers.
def constructFromPrePost(self, pre, post): if not pre and not post: return None root = TreeNode(pre[0]) if len(pre) == 1 and len(post) == 1: return root if pre[1] == post[-2]: lpre, lpost = pre[1:], post[:len(post)-1] ltree = self.constructFromPrePost(lpre, lpost) root.left = ltree else: lpre = pre[1:pre.index(post[-2])] lpost = post[:post.index(pre[1]) + 1] rpre = pre[pre.index(post[-2]):] rpost = post[post.index(pre[1])+1:-1] ltree = self.constructFromPrePost(lpre, lpost) rtree = self.constructFromPrePost(rpre, rpost) root.left, root.right = ltree, rtree return root
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildTree(self, inorder: 'List[int]', postorder: 'List[int]') -> 'TreeNode':\n self.post_index = len(postorder) - 1\n dict = {}\n for i, num in enumerate(inorder):\n dict[num] = i\n \n def helper(in_left, in_right):\n if in_left > in_right:\n ...
[ "0.6668872", "0.6449099", "0.6396742", "0.6088152", "0.60584265", "0.5841729", "0.57979167", "0.5787835", "0.5776373", "0.5711486", "0.55507797", "0.5525655", "0.5513274", "0.5498472", "0.54892987", "0.5478768", "0.54738367", "0.5418973", "0.53901404", "0.53882676", "0.536626...
0.66620654
1
Create and return an instance of the Isort plugin.
def setup_isort_tool_plugin(custom_rsc_path=None): arg_parser = argparse.ArgumentParser() if custom_rsc_path is not None: resources = Resources([custom_rsc_path]) else: resources = Resources( [os.path.join(os.path.dirname(statick_tool.__file__), "plugins")] ) config = Config(resources.get_file("config.yaml")) plugin_context = PluginContext(arg_parser.parse_args([]), resources, config) plugin_context.args.output_directory = os.path.dirname(__file__) itp = IsortToolPlugin() itp.set_plugin_context(plugin_context) return itp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sorter(Plugin):\n return Plugin.order", "def create_plugin(self, **kwargs):\n return self.plugin_class(**kwargs)", "def new(self, sort, properties=None):\n if sort is None:\n sort = UNKNOWNSORT\n # find next available vid\n vid, index = self.vid, self.index...
[ "0.5880008", "0.572811", "0.5263633", "0.5135936", "0.5086754", "0.5085847", "0.5052518", "0.5052518", "0.5028125", "0.5018764", "0.5011754", "0.49714696", "0.49462602", "0.48368236", "0.48352364", "0.4823109", "0.48152107", "0.48114508", "0.48113042", "0.479689", "0.47876537...
0.59617186
0
Test that the plugin manager can find the Isort plugin.
def test_isort_tool_plugin_found(): if sys.version_info.major == 3 and sys.version_info.minor < 6: pytest.skip("isort is only available for Python 3.6+, unable to test") manager = PluginManager() # Get the path to statick_tool/__init__.py, get the directory part, and # add 'plugins' to that to get the standard plugins dir manager.setPluginPlaces( [os.path.join(os.path.dirname(statick_tool.__file__), "plugins")] ) manager.setCategoriesFilter( { "Tool": ToolPlugin, } ) manager.collectPlugins() # Verify that a plugin's get_name() function returns "isort" assert any( plugin_info.plugin_object.get_name() == "isort" for plugin_info in manager.getPluginsOfCategory("Tool") ) # While we're at it, verify that a plugin is named Isort Tool Plugin assert any( plugin_info.name == "Isort Tool Plugin" for plugin_info in manager.getPluginsOfCategory("Tool") )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_plugins(self):\n from omtk import plugin_manager\n pm = plugin_manager.plugin_manager\n\n loaded_plugin_names = [plugin.cls.__name__ for plugin in pm.get_loaded_plugins_by_type('modules')]\n\n builtin_plugin_names = (\n 'Arm',\n 'FK',\n 'Additiv...
[ "0.66552377", "0.6554768", "0.6273206", "0.62442213", "0.6233943", "0.62220126", "0.60991585", "0.60933506", "0.6089562", "0.5941484", "0.59386533", "0.59242934", "0.5922501", "0.58955973", "0.58816534", "0.58793914", "0.58021533", "0.57796365", "0.57335883", "0.57034636", "0...
0.8355158
0
Verify that we can parse the normal output of isort.
def test_isort_tool_plugin_parse_valid(): itp = setup_isort_tool_plugin() total_output = [] output = "/tmp/x.py" total_output.append(output) output = "/tmp/y.py" total_output.append(output) issues = itp.parse_output(total_output) assert len(issues) == 2 assert issues[0].filename == "/tmp/x.py" assert issues[0].line_number == "0" assert issues[0].tool == "isort" assert issues[0].issue_type == "formatting" assert issues[0].severity == "3" assert issues[0].message == "Imports are incorrectly sorted and/or formatted." assert issues[1].filename == "/tmp/y.py"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_isort(self):\n chdir(REPO_ROOT)\n cmd = [\"isort\", \"-df\", \"-rc\", \"-c\", *SRC_DIRS]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, f\"isort issues:\\n{proc.stdout.decode('utf-8')}\""...
[ "0.6781174", "0.6635181", "0.6309134", "0.62981886", "0.61402726", "0.6060493", "0.60508925", "0.59607303", "0.5936084", "0.59126383", "0.587762", "0.58522105", "0.5729688", "0.56102586", "0.5577316", "0.55231833", "0.5521064", "0.55136585", "0.55019987", "0.54842657", "0.548...
0.77473277
0
Test what happens when an OSError is raised (usually means isort doesn't exist).
def test_isort_tool_plugin_scan_oserror(mock_subprocess_check_output): mock_subprocess_check_output.side_effect = OSError("mocked error") itp = setup_isort_tool_plugin() package = Package( "valid_package", os.path.join(os.path.dirname(__file__), "valid_package") ) package["python_src"] = [ os.path.join(os.path.dirname(__file__), "valid_package", "sample.py") ] issues = itp.scan(package, "level") assert not issues
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def os_error():\n en = ctypes.get_errno()\n ctypes.set_errno(0)\n if en == 0:\n return OSError(en, \"(no errno found)\")\n else:\n return OSError(en, errno.errorcode[en])", "def test_dump_handles_os_error(mocker):\n\tmocker.patch('subprocess.Popen', side_effect=OSError('no such file'))\...
[ "0.7091949", "0.68408364", "0.6782386", "0.6650079", "0.66032916", "0.6333252", "0.6230165", "0.6217626", "0.61586803", "0.6140074", "0.60988116", "0.6090791", "0.6002094", "0.60008836", "0.598052", "0.5965969", "0.5965969", "0.59647274", "0.595298", "0.59336317", "0.59047925...
0.65513504
5
Test what happens when a CalledProcessError is raised (usually means isort hit an error).
def test_isort_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output): mock_subprocess_check_output.side_effect = subprocess.CalledProcessError( 0, "", output="mocked error" ) itp = setup_isort_tool_plugin() package = Package( "valid_package", os.path.join(os.path.dirname(__file__), "valid_package") ) package["python_src"] = [ os.path.join(os.path.dirname(__file__), "valid_package", "sample.py") ] issues = itp.scan(package, "level") assert len(issues) == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_make_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = subprocess.CalledProcessError(1, '', output=\"mocked error\")\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n ...
[ "0.66911113", "0.64319515", "0.63943857", "0.63668126", "0.6338667", "0.63044673", "0.63034093", "0.6283694", "0.62171775", "0.6183645", "0.61580575", "0.6093192", "0.6079444", "0.603348", "0.6027173", "0.5997922", "0.59800065", "0.59751534", "0.59546334", "0.5954266", "0.595...
0.6745141
0
Get the release history from pypi Use the json API to get the release history from pypi. The returned json structure includes a 'releases' dictionary which has keys that are release numbers and the value is an array of uploaded files. While we don't have a 'release time' per say (only the upload time on each of the files), we'll consider the timestamp on the first source file found (which will be a .zip or tar.gz typically) to be 'release time'. This is inexact, but should be close enough for our purposes.
def get_releases_for_package(name, since): f = urlreq.urlopen("http://pypi.org/project/%s/json" % name) jsondata = f.read() data = json.loads(jsondata) releases = [] for relname, rellist in data['releases'].iteritems(): for rel in rellist: if rel['python_version'] == 'source': when = _parse_pypi_released(rel['upload_time']) # for speed, only care about when > since if when < since: continue releases.append( Release( name, relname, rel['filename'], when)) break return releases
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_releases():\n response = requests.get(PYPI_URL.format(package=PYPI_PACKAGE_NAME))\n if response:\n data = response.json()\n\n releases_dict = data.get('releases', {})\n\n if releases_dict:\n for version, release in releases_dict.items():\n release_forma...
[ "0.6942631", "0.66807014", "0.6496823", "0.6251065", "0.6223727", "0.61681485", "0.607848", "0.605974", "0.6022348", "0.5989354", "0.59570056", "0.59270537", "0.5916981", "0.58646035", "0.5862971", "0.5855767", "0.5854523", "0.5853826", "0.58354694", "0.5833912", "0.58016866"...
0.6868364
1
Calculates X values for given list of Y values in range defined by a and b parameters. X values are simply calculated by dividing given X range by number of nodes, so they are distributed in even range.
def prepare_initial_nodes(x_start, x_end, nodes_y): nodes_x = [float(x_start + ((x_end - x_start) / (len(nodes_y) - 1)) * i) for i in range(0, len(nodes_y))] nodes_y = [float(y) for y in nodes_y] print(nodes_x) print(nodes_y) nodes = list(zip(nodes_x, nodes_y)) return nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projectionX(xdata, ydata, nbins, xrange=None, yrange=None):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(xmin, xmax, nbins+1)\n y_out = np.empty(nbins)\n dx = np.dif...
[ "0.58776325", "0.5662537", "0.5642036", "0.5495832", "0.5469611", "0.5420076", "0.5375376", "0.53669316", "0.53453207", "0.53439814", "0.53289247", "0.5299425", "0.5295588", "0.52585924", "0.52468276", "0.5246785", "0.5245637", "0.5245567", "0.52296543", "0.5221779", "0.52137...
0.58207065
1
Takes list of divided differences nodes and calculates new divided differences node from each pair of nodes_to_compute. In other words, it computes next level of so called Newton's second interpolation form tree.
def calculate_divided_differences_row(nodes_to_compute): divided_differences = [] if len(nodes_to_compute) == 1: return None for i in range(0, len(nodes_to_compute) - 1): child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1]) child.calculate_value() divided_differences.append(child) for node in divided_differences: print(node, end='') print('\n') return divided_differences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_divided_differences(nodes):\n nodes_to_compute = []\n divided_differences = []\n for node in nodes:\n nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1]))\n\n divided_differences.append(tuple(nodes_to_compute))\n\n while len(nodes_to_compute) > 1...
[ "0.65839136", "0.57315016", "0.567347", "0.55890775", "0.55738044", "0.5463602", "0.54403263", "0.5439998", "0.5405277", "0.53822875", "0.5344752", "0.5334331", "0.5325484", "0.5318365", "0.53122985", "0.53017414", "0.5295547", "0.5275849", "0.52737594", "0.52574426", "0.5230...
0.6325345
1
Calculates divided differences for given interpolation nodes. It is assumed, that at least two interpolation nodes are provided. Each tuple of returned list represents one level of divided differences tree.
def calculate_divided_differences(nodes): nodes_to_compute = [] divided_differences = [] for node in nodes: nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1])) divided_differences.append(tuple(nodes_to_compute)) while len(nodes_to_compute) > 1: next_node_row = calculate_divided_differences_row(nodes_to_compute) divided_differences.append(tuple(next_node_row)) nodes_to_compute = next_node_row return divided_differences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_divided_differences_row(nodes_to_compute):\n divided_differences = []\n\n if len(nodes_to_compute) == 1:\n return None\n\n for i in range(0, len(nodes_to_compute) - 1):\n child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1])\n chil...
[ "0.7081299", "0.526944", "0.52446645", "0.5239564", "0.523342", "0.51974493", "0.51963425", "0.5140926", "0.50798607", "0.5076724", "0.50607145", "0.5054353", "0.5047319", "0.49978474", "0.49799216", "0.49528977", "0.49478018", "0.49372533", "0.49131808", "0.490018", "0.48992...
0.77134573
0
Creates polynomial from given list of divided differences. Polynomial string is created according to equation provided in project docs.
def calculate_newton_interpolation(divided_differences): polynomial = [] for i, divided_differences_row in enumerate(divided_differences): polynomial_part = '({0})'.format(divided_differences_row[0].divided_difference) for j in range(0, i): polynomial_part += '*(x-{0})'.format(divided_differences[0][j].x) polynomial_part += '+' polynomial.append(polynomial_part) polynomial_str = ''.join(polynomial)[:-1] print('Calculated polynomial: {0}'.format(polynomial_str)) # Heuristic simplification of calculated polynomial simplified_polynomial = sy.simplify(polynomial_str) print("Simplified polynomial: {0}".format(simplified_polynomial)) return simplified_polynomial
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_to_poly(polynomial_list):\n max_degree = len(polynomial_list) - 1\n strings = []\n opts = ['x', '']\n for index, num in enumerate(polynomial_list):\n if num == 0:\n continue\n if index < max_degree - 1:\n string = '{}x^{}'.format(num, max_degree - index)\n ...
[ "0.6846461", "0.65011793", "0.6292352", "0.6274255", "0.62225056", "0.61097145", "0.60969347", "0.6090148", "0.60779357", "0.60200155", "0.60196537", "0.59783614", "0.5969226", "0.5965326", "0.5856345", "0.5849811", "0.58080703", "0.5775723", "0.5735005", "0.5733751", "0.5722...
0.70788777
0