query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Split string by upper case letters. F.e. useful to convert camel case strings to underscore separated ones. words (list)
def camel_case_to_lower_case_underscore(string): words = [] from_char_position = 0 for current_char_position, char in enumerate(string): if char.isupper() and from_char_position < current_char_position: words.append( string[from_char_position:current_char_position].lower()) from_char_position = current_char_position words.append(string[from_char_position:].lower()) return '_'.join(words)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_uppercase(word):\r\n final_word = ''\r\n for i in word:\r\n final_word += ' %s' % i if i.isupper() else i\r\n\r\n return final_word.strip()", "def camelCaseSplit(text):\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)\n return [m.group(0) for ...
[ "0.7751512", "0.7259431", "0.7180602", "0.7063711", "0.70595056", "0.70409745", "0.69426405", "0.6801742", "0.67717946", "0.6744924", "0.67325515", "0.6694427", "0.6669808", "0.6653575", "0.6632303", "0.66086304", "0.66041285", "0.66029406", "0.65299785", "0.65299785", "0.649...
0.63527435
28
Convert string or unicode from lowercase underscore to camelcase
def lower_case_underscore_to_camel_case(string): splitted_string = string.split('_') # use string's class to work on the string to keep its type class_ = string.__class__ return class_.join('', map(class_.capitalize, splitted_string))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)", "def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')", "def underscore_to_camelcase(underscore_str...
[ "0.82202953", "0.8198104", "0.81893754", "0.7956747", "0.79494596", "0.787746", "0.7843858", "0.7833112", "0.7820128", "0.77906346", "0.77414733", "0.770459", "0.77041924", "0.76800525", "0.7620144", "0.75948817", "0.7570997", "0.7570997", "0.7565195", "0.7542623", "0.7537749...
0.7864236
6
Rolls a dice and returns the number
def diceRoll(): return randint(1,6)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def roll_dice():\n roll = random.randint(1, 6)\n return roll", "def simple_roll(dice):\n return roll(dice).total", "def roll_dice(self):\n self.roll = (random.randint(1,6), random.randint(1,6))\n return self....
[ "0.8223234", "0.8189671", "0.80983704", "0.8092796", "0.79572994", "0.78616965", "0.7784601", "0.77547574", "0.7703866", "0.7699241", "0.7689161", "0.7647644", "0.76401645", "0.76285684", "0.75962", "0.7539634", "0.75269604", "0.7526193", "0.7524304", "0.751236", "0.75110435"...
0.78609216
6
Rolls the double dice, checks that they equal six and returns the result
def generateNumbers(times): two_occured = 0 got_six = 0 numbers = {} for i in range(times): roll_one = diceRoll() roll_two = diceRoll() total = roll_one + roll_two if total == 6: # Don't care about others (for now, could use it to check how often you'd roll six I guess ?) got_six = got_six + 1 # Add the numbers to return dict if roll_one in numbers: numbers[roll_one] = numbers[roll_one] + 1 else: numbers[roll_one] = 1 if roll_two in numbers: numbers[roll_two] = numbers[roll_two] + 1 else: numbers[roll_two] = 1 if roll_one == 2 or roll_two == 2: two_occured = two_occured + 1 percent = (two_occured / got_six) * 100 return (percent, numbers, got_six)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll_dice(check_double=True):\n\n roll = np.random.choice(np.arange(1, 7), 2)\n\n if check_double:\n return roll.sum(), roll[0] == roll[1]\n else:\n return roll.sum()", "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def roll_dice(num_rolls, dice=six_si...
[ "0.76994514", "0.76561534", "0.73893756", "0.73837674", "0.72581154", "0.7254027", "0.72445756", "0.7230524", "0.7154722", "0.7104546", "0.70965785", "0.70899475", "0.7055198", "0.70499194", "0.70469195", "0.70377696", "0.70235765", "0.69848144", "0.6969932", "0.69600403", "0...
0.0
-1
Create a new CoauthorshipEgoNetwork by specifying the id and the name of the ego.
def __init__(self, ego_name: str, ego_id: int = None, last_time: int = 0, min_pub_date: int = None): self.ego_name = ego_name self.publications = {} self.min_pub_date = min_pub_date super(CoauthorshipNamedEgoNetwork, self).__init__(ego_id, last_time)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_network(**kwargs)", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Creat...
[ "0.6290861", "0.6215418", "0.6022672", "0.6000608", "0.5971473", "0.5934701", "0.58295625", "0.5809999", "0.56568396", "0.56159425", "0.56093043", "0.54925305", "0.5469227", "0.5450064", "0.53767145", "0.5361078", "0.5341799", "0.5335839", "0.53156966", "0.53053945", "0.53020...
0.5981479
4
Add a contact representing a publication coauthored by the ego to the ego network.
def add_publication(self, pub_id: int, timestamp: int, title: str, coauthors: List[str], contact_type="__all__"): if self.min_pub_date is None or timestamp >= self.min_pub_date: # standardize names, remove possible duplicates and wrong names, and remove ego if present std_coauth_names = set() for coauthor_name in coauthors: if len(coauthor_name) > 1: std_coauth_names.add(self.get_std_author_name(coauthor_name)) std_coauth_names.discard(self.get_std_author_name(self.ego_name)) for coauthor_name in std_coauth_names: if self.last_time is None or timestamp > self.last_time: self.last_time = timestamp super(CoauthorshipNamedEgoNetwork, self).add_contact(timestamp=timestamp, alter_id=coauthor_name, contact_type=contact_type, text=title, num_contacted_alters=len(std_coauth_names)) self.publications[pub_id] = (title, std_coauth_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddContact(self, contact):\n\t\tcontact.group_membership_info = [gdata.contacts.data.GroupMembershipInfo(href=self.GetFirstGroupId())]\n\t\ttry:\n\t\t\tself.client.CreateContact(contact)\n\t\texcept gdata.client.RequestError:\n\t\t\tpass", "def add_contact(self, contact):\n\t\tclient_log.debug(f'Создание кон...
[ "0.64493227", "0.6373825", "0.62784225", "0.6231083", "0.60652024", "0.59836507", "0.59490603", "0.579805", "0.5783082", "0.5768027", "0.5641641", "0.5640455", "0.56229985", "0.56229985", "0.5609489", "0.55984056", "0.54995424", "0.54777515", "0.5452757", "0.5432362", "0.5429...
0.6801305
0
Construct a CallCount instance.
def __init__(self, function): self.function = function self.count = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self) -> CountRequestBuilder:\n from .count.count_request_builder import CountRequestBuilder\n\n return CountRequestBuilder(self.request_adapter, self.path_parameters)", "def count(self) -> CountRequestBuilder:\n from .count.count_request_builder import CountRequestBuilder\n\n ...
[ "0.58695185", "0.58695185", "0.5629136", "0.5570181", "0.5538394", "0.552469", "0.541156", "0.5403923", "0.5349386", "0.5247626", "0.5242342", "0.5222286", "0.5211293", "0.51988417", "0.5185769", "0.51804847", "0.51790524", "0.5139803", "0.5135964", "0.51276577", "0.5105392",...
0.47872669
47
Reset the function call count to zero.
def reset_count(self): self.count = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_counter(self) -> None:", "def reset_calls(self) -> None:\n self.logger.info(\"Reset calls\")\n\n self._has_bob = False\n self._has_single = False", "def reset (self):\n self.counter = 0", "def reset(self):\n self.counter = 0", "def reset(self):\n # self.c...
[ "0.7342854", "0.7133632", "0.7101041", "0.7076571", "0.6986588", "0.6887243", "0.68343556", "0.6833832", "0.6802058", "0.6802058", "0.68014395", "0.68014395", "0.68014395", "0.6790398", "0.67423177", "0.6730629", "0.6721631", "0.6674541", "0.6674541", "0.6674541", "0.6646674"...
0.7426931
0
Compare two values by their natural ordering.
def compare(a, b): return a - b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def natural_sort_comparison(value1, value2):\n return cmp(_natural_sort_key(value1), _natural_sort_key(value2))", "def natural_sort_case_insensitive_comparison(value1, value2):\n return natural_sort_comparison(value1.lower(), value2.lower())", "def cmp ( self, object1, object2 ):\n return cmp( sel...
[ "0.8129934", "0.713771", "0.68130374", "0.67042726", "0.6640327", "0.66176546", "0.6604838", "0.65097326", "0.6501352", "0.6492879", "0.6490977", "0.64719254", "0.64077866", "0.63938427", "0.6344175", "0.6319825", "0.62944037", "0.6267962", "0.6250287", "0.6224336", "0.621590...
0.60554993
32
Test the average execution time of a given function.
def time_function(function, runs=1, average=min): results = [None] * runs for i in range(runs): t0 = time.perf_counter() function() t1 = time.perf_counter() results[i] = t1 - t0 return average(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculateRunTime(function, *args):\n startTime = time.time()\n result = function(*args)\n return time.time() - startTime, result", "def execution_time(function: Callable, args=tuple(), kwargs=dict()):\n start_time = time.time()\n function(*args, **kwargs)\n end_time = time.time()\n retur...
[ "0.72246695", "0.7189616", "0.69638824", "0.69256145", "0.68879926", "0.6848878", "0.6752755", "0.6739705", "0.67355597", "0.6723226", "0.67124665", "0.6660492", "0.66482943", "0.6646599", "0.66432923", "0.66369736", "0.66163874", "0.6569194", "0.6540607", "0.6531069", "0.652...
0.73044527
0
Test the total execution time of a given function.
def time_function_total(function, runs=1): t0 = time.perf_counter() for _ in range(runs): function() t1 = time.perf_counter() return t1 - t0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execution_time(function: Callable, args=tuple(), kwargs=dict()):\n start_time = time.time()\n function(*args, **kwargs)\n end_time = time.time()\n return end_time - start_time", "def calculateRunTime(function, *args):\n startTime = time.time()\n result = function(*args)\n return time.tim...
[ "0.78346467", "0.7678804", "0.76649004", "0.75340277", "0.73253274", "0.7304831", "0.72743374", "0.72719234", "0.7266114", "0.7256505", "0.7253224", "0.7231037", "0.71410745", "0.71373457", "0.71289027", "0.71013814", "0.7084868", "0.70669544", "0.706348", "0.7050488", "0.704...
0.7101723
15
Query if a value is in an array via iterative linear search.
def linear_search_iterative(array, value): for elt in array: if compare(elt, value) == 0: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_search_foundornot(arr: IntList, query: int) -> bool:\n position: int = 0\n found: bool = False\n while position < len(arr) and not found:\n if arr[position] == query:\n found = True\n position += 1\n return found", "def linear_search_recursive(array, value):\n #...
[ "0.7187239", "0.7014195", "0.68994564", "0.68296456", "0.6682872", "0.65983593", "0.6571656", "0.65281767", "0.6450205", "0.6415358", "0.6367779", "0.6351479", "0.6338945", "0.6338945", "0.6338945", "0.6338945", "0.63235927", "0.6252645", "0.6220145", "0.6189393", "0.6184134"...
0.7844321
0
Query if a value is in an array via recursive linear search.
def linear_search_recursive(array, value): # Base case for empty list n = len(array) if n == 0: return False # Recursive case if compare(array[0], value) == 0: return True else: return linear_search_recursive(array[1:], value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_search_recursive(array, value):\n # Base cases for empty or singular list\n n = len(array)\n if n == 0:\n return False\n elif n == 1:\n return compare(array[0], value) == 0\n\n # Recursive case\n middle = n // 2\n if compare(array[middle], value) == 0:\n return ...
[ "0.69967544", "0.69195235", "0.6862056", "0.67465794", "0.65924174", "0.65537065", "0.64034545", "0.6274502", "0.6266495", "0.62526584", "0.6248882", "0.6248882", "0.6248882", "0.6248882", "0.6202555", "0.6176431", "0.6166238", "0.6148709", "0.60954624", "0.6085641", "0.60850...
0.7844012
0
Query if a value is in an array via recursive binary search.
def binary_search_recursive(array, value): # Base cases for empty or singular list n = len(array) if n == 0: return False elif n == 1: return compare(array[0], value) == 0 # Recursive case middle = n // 2 if compare(array[middle], value) == 0: return True elif compare(array[middle], value) < 0: return binary_search_recursive(array[middle + 1:], value) else: return binary_search_recursive(array[:middle], value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_search_recursive(array, value):\n # Base case for empty list\n n = len(array)\n if n == 0:\n return False\n\n # Recursive case\n if compare(array[0], value) == 0:\n return True\n else:\n return linear_search_recursive(array[1:], value)", "def Search(array, value)...
[ "0.75950164", "0.72089994", "0.6862112", "0.6780811", "0.6759658", "0.6693581", "0.6673692", "0.6622626", "0.6622626", "0.6622626", "0.66219646", "0.66128653", "0.65874904", "0.65533173", "0.65308404", "0.6481852", "0.6433637", "0.64171046", "0.64025944", "0.63988364", "0.629...
0.7462939
1
Query if a value is in an array via iterative binary search.
def binary_search_iterative(array, value): # Iteration terminates when (min, max) range has shrunk such that min > max min = 0 max = len(array) - 1 while min <= max: middle = (min + max) // 2 comparison = compare(array[middle], value) if comparison == 0: return True elif comparison < 0: min = middle + 1 else: max = middle - 1 return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_search_iterative(array, value):\n for elt in array:\n if compare(elt, value) == 0:\n return True\n\n return False", "def Search(array, value):\n left = 0\n right = len(array) - 1\n while left <= right:\n mid = left + (right - left) // 2\n if array[mid] ==...
[ "0.741259", "0.7237241", "0.69422096", "0.692457", "0.682915", "0.6787418", "0.6777182", "0.6744018", "0.67439973", "0.6704923", "0.6696993", "0.669297", "0.6692132", "0.6692132", "0.6692132", "0.66770345", "0.6598662", "0.6594714", "0.6561431", "0.6525377", "0.65189373", "...
0.7150931
2
Sort a list via selection sort.
def selection_sort(array): n = len(array) result = array.copy() for i in range(n - 1): # Find next-smallest value smallest = i for j in range(i + 1, n): if compare(result[j], result[smallest]) < 0: smallest = j # Swap next-smallest value into position if i != smallest: result[i], result[smallest] = result[smallest], result[i] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def selection_sort(lst):\n l = len(lst)\n for i in range(l - 1):\n pos = i\n for j in range(i + 1, l):\n if lst[j] < lst[pos]:\n pos = j\n\n if pos > i:\n lst[i], lst[pos] = lst[pos], lst[i]\n\n return lst", "def selection_sort(unsorted_list):\r\...
[ "0.7748435", "0.7590971", "0.75599235", "0.7533883", "0.7526575", "0.7514822", "0.7492142", "0.74519706", "0.7430942", "0.74007577", "0.72966677", "0.7260174", "0.7220203", "0.7211774", "0.72063357", "0.7201985", "0.7201396", "0.7140281", "0.71077573", "0.70871216", "0.705984...
0.6030952
84
Sort a list via recursive selection sort.
def selection_sort_recursive(array): # Base case for empty or singular list n = len(array) if n < 2: return array # Find smallest value result = array.copy() smallest = 0 for i in range(1, n): if compare(result[i], result[smallest]) < 0: smallest = i # Swap smallest into first position if smallest != 0: result[0], result[smallest] = result[smallest], result[0] # Recur on remainder of array return [result[0]] + selection_sort_recursive(result[1:])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def selection_sort(l):\n walk = 0\n while walk < len(l):\n i = walk\n while i < len(l):\n if l[i] < l[walk]:\n # swap i and walk\n tmp = l[walk]\n l[walk] = l[i]\n l[i] = tmp\n i += 1\n walk += 1\n retur...
[ "0.7316072", "0.7191435", "0.7096822", "0.6961271", "0.690172", "0.6899829", "0.689302", "0.68900025", "0.68848956", "0.68338776", "0.6833445", "0.68095154", "0.6795468", "0.6794519", "0.67531425", "0.6728902", "0.66321385", "0.6614806", "0.6598474", "0.6597345", "0.6581996",...
0.681675
11
Sort a list via insertion sort.
def insertion_sort(array): n = len(array) result = array.copy() # Swap each value backwards until in correct position for i in range(1, n): j = i while j > 0 and compare(result[j], result[j - 1]) < 0: result[j], result[j - 1] = result[j - 1], result[j] j -= 1 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insertionSort(list):", "def insertion_sort(mylist):\n for i in range(1, len(mylist)):\n x = mylist[i]\n j = i-1\n while j >= 0 and mylist[j] > x:\n mylist[j+1] = mylist[j]\n j -= 1\n mylist[j+1] = x", "def insertion_sort(student_list):\n length = ...
[ "0.9008579", "0.81677395", "0.81517905", "0.815131", "0.8149608", "0.8100819", "0.806909", "0.8044392", "0.8003723", "0.7992259", "0.789808", "0.788748", "0.78571534", "0.7837437", "0.7747174", "0.7729622", "0.7676159", "0.767438", "0.7664585", "0.7653782", "0.7650872", "0....
0.66808194
60
Sort a list via recursive insertion sort.
def insertion_sort_recursive(array): # Base case for empty or singular list n = len(array) if n < 2: return array # Recursive case is last element to insert appended to sorted sub-list result = insertion_sort_recursive(array[:-1]) + [array[-1]] # Swap last value backwards until in correct position i = n - 1 while i > 0 and compare(result[i], result[i - 1]) < 0: result[i], result[i - 1] = result[i - 1], result[i] i -= 1 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insertionSort(list):", "def insertion_sort(p_list):\n if len(p_list) > 1: # list of length 0 or 1 is sorted\n marker = p_list.first()\n while marker != p_list.last():\n pivot = p_list.after(marker) # next item to place\n value = pi...
[ "0.80922496", "0.7654057", "0.7528426", "0.7520646", "0.7519581", "0.7408556", "0.7397398", "0.73794985", "0.73751813", "0.7369835", "0.73261505", "0.72022533", "0.7196902", "0.71854836", "0.7178526", "0.71708345", "0.71572083", "0.7106301", "0.7105172", "0.70820385", "0.7079...
0.74628884
5
Merge two sorted lists into one.
def merge(a, b): result = [] # Append smallest values to result until either list is exhausted i = j = 0 while i < len(a) and j < len(b): if compare(a[i], b[j]) < 0: result.append(a[i]) i += 1 else: result.append(b[j]) j += 1 # Append all remaining values from the unexhausted list if i < len(a): result.extend(a[i:]) else: result.extend(b[j:]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2", "def merge(list1, list2)...
[ "0.8002001", "0.79542124", "0.78766847", "0.7858217", "0.78003126", "0.7746179", "0.77270013", "0.7690721", "0.7681128", "0.76697636", "0.7637869", "0.76258785", "0.7621432", "0.76182973", "0.76182604", "0.76102555", "0.759736", "0.759385", "0.7591011", "0.7565296", "0.755625...
0.78277063
4
Sort a list via recursive (topdown) mergesort.
def mergesort_recursive(array): # Base case for empty or singular list n = len(array) if n < 2: return array # Recur on two halves of array and merge results mid = n // 2 return merge( mergesort_recursive(array[:mid]), mergesort_recursive(array[mid:]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_sort(list):\n\n\tif len(list) <= 1:\n\t\treturn list\n\n\tleft_half, right_half = split(list)\n\tleft = merge_sort(left_half)\n\tright = merge_sort(right_half)\n\n\treturn merge(left, right)", "def merge_sort(list):\r\n \r\n if len(list) <= 1:\r\n return list\r\n \r\n left_half, righ...
[ "0.7982456", "0.7917089", "0.78690946", "0.7797491", "0.7603899", "0.7586151", "0.75551754", "0.7543085", "0.74754685", "0.74723524", "0.74529386", "0.7429456", "0.73927414", "0.7391901", "0.7391193", "0.7382016", "0.73695296", "0.73677033", "0.73447967", "0.7275034", "0.7266...
0.7005334
38
Sort a list via iterative (bottomup) mergesort.
def mergesort_iterative(array): n = len(array) result = array.copy() # Merge runs of length 1, 2, 4, 8, ... length = 1 while length < n: # Merge each pair of runs for i in range(0, n, 2 * length): mid = i + length upper = i + 2 * length result[i:upper] = merge(result[i:mid], result[mid:upper]) length *= 2 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_sort(list):\n\n\tif len(list) <= 1:\n\t\treturn list\n\n\tleft_half, right_half = split(list)\n\tleft = merge_sort(left_half)\n\tright = merge_sort(right_half)\n\n\treturn merge(left, right)", "def merge_sort(items):\r\n # TODO: Check if list is so small it's already sorted (base case)\r\n # TODO...
[ "0.74827385", "0.7417978", "0.74099773", "0.73498195", "0.73487973", "0.7329769", "0.72622186", "0.72107303", "0.71782786", "0.71331424", "0.712062", "0.70827264", "0.70669717", "0.7044626", "0.6979438", "0.69784135", "0.69016755", "0.68788517", "0.6875574", "0.68659335", "0....
0.6378145
74
Sort a list via hybrid recursive (topdown) mergesort. Delegates to insertion sort when n is less than or equal to some threshold.
def mergesort_recursive_hybrid(array, threshold=37): # Base case delegates to insertion sort n = len(array) if n <= threshold: return insertion_sort(array) # Recur on two halves of array and merge results mid = n // 2 return merge( mergesort_recursive(array[:mid]), mergesort_recursive(array[mid:]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_sort(L):\n n = len(L)\n if n < 2:\n return L\n mid = n // 2\n left = L[:mid]\n right = L[mid:]\n merge_sort(left)\n merge_sort(right)\n merge(L, left, right)", "def merge_sort(unsorted, threshold, reverse):\r\n length = len(unsorted)\r\n if length < 2:\r\n re...
[ "0.7029845", "0.696164", "0.68413234", "0.68319064", "0.6830567", "0.6794161", "0.6786466", "0.6744685", "0.66700613", "0.66018665", "0.65889466", "0.6586275", "0.6528771", "0.65160424", "0.65155774", "0.6514788", "0.650817", "0.65026766", "0.6501337", "0.6469715", "0.6448953...
0.7173727
0
Sort a list via hybrid iterative (bottomup) mergesort. Delegates to insertion sort when n is less than or equal to some threshold.
def mergesort_iterative_hybrid(array, threshold=37): n = len(array) result = array.copy() # Initial insertion sort pass for i in range(0, n, threshold): result[i:i+threshold] = insertion_sort(result[i:i+threshold]) # Merge runs of length threshold, 2*threshold, ... length = threshold while length < n: # Merge each pair of runs for i in range(0, n, 2 * length): mid = i + length upper = i + 2 * length result[i:upper] = merge(result[i:mid], result[mid:upper]) length *= 2 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mergesort_recursive_hybrid(array, threshold=37):\n # Base case delegates to insertion sort\n n = len(array)\n if n <= threshold:\n return insertion_sort(array)\n\n # Recur on two halves of array and merge results\n mid = n // 2\n return merge(\n mergesort_recursive(array[:mid]),...
[ "0.7101632", "0.6605162", "0.6547044", "0.6530211", "0.6487875", "0.64746773", "0.64605856", "0.64375854", "0.64229447", "0.6305018", "0.62946403", "0.6249969", "0.62481827", "0.62260824", "0.621573", "0.6202988", "0.61955655", "0.61911744", "0.61827713", "0.6164877", "0.6157...
0.70031637
1
Reorder elements in a list range such that a pivot separates them.
def partition(array, low, high, pivot): # Boundary between lower and upper partitions i = low # Compare every other element against pivot for j in range(low, high + 1): if j != pivot and compare(array[j], array[pivot]) < 0: # Swap into lower partition and increment boundary array[i], array[j] = array[j], array[i] i += 1 # Swap pivot value into final position array[i], array[pivot] = array[pivot], array[i] return i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __Partition(ulist, start, stop):\n i = start - 1\n pivot = ulist[stop]\n for j in range(start, stop):\n if ulist[j] <= pivot:\n i += 1\n ulist[i], ulist[j] = ulist[j], ulist[i]\n ulist[i+1],ulist[stop] = ulist[stop], ulist[i+1]\n return i+1", "def test_get_pivot_in...
[ "0.6311658", "0.6205301", "0.60918045", "0.6083206", "0.6060971", "0.5999326", "0.59483886", "0.5906217", "0.58992696", "0.58958167", "0.58193344", "0.57936126", "0.57872474", "0.5752626", "0.5743744", "0.5743489", "0.57380205", "0.5719808", "0.5713487", "0.5706366", "0.56969...
0.0
-1
Sort a list inplace via quicksort.
def quicksort(array, middle=True): def qsort(array, low, high): while low < high: pivot = (low + high) // 2 if middle else high pivot = partition(array, low, high, pivot) if pivot - low <= high - pivot: qsort(array, low, pivot - 1) low = pivot + 1 else: qsort(array, pivot + 1, high) high = pivot - 1 qsort(array, 0, len(array) - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quick_sort(mylist):\n _inplace_quick_sort(mylist, 0, len(mylist)-1)", "def quicksort(lst):\n n = len(lst)\n qsort(lst, 0, n - 1)", "def quick_sort(l):\n return _quick_sort(l, 0, len(l) - 1)", "def quick_sort(a_list):\n return quick_sort_helper(a_list, 0, len(a_list) - 1)", "def qsort(my_...
[ "0.8566388", "0.7811469", "0.7666376", "0.7665722", "0.7628024", "0.7595974", "0.7590224", "0.75602454", "0.75491625", "0.7488644", "0.7474256", "0.74595135", "0.7425829", "0.7419462", "0.7412757", "0.7373703", "0.7277139", "0.7268242", "0.72630036", "0.7221897", "0.70804954"...
0.0
-1
Siftup the last node (end1) in the given max heap.
def sift_up(heap, start, end): # Swap last node with parents until no longer greater. i = end - 1 heaped = False while i > start and not heaped: parent = (i - 1) // 2 if compare(heap[i], heap[parent]) > 0: heap[i], heap[parent] = heap[parent], heap[i] i = parent else: heaped = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sift_down(heap, start, end):\n # Swap first node with children until no longer smaller.\n i = start\n heaped = False\n while not heaped:\n left = i * 2 + 1\n right = i * 2 + 2\n largest = i\n\n # Find largest of i, left and right\n if left < end and compare(heap[l...
[ "0.71801686", "0.7001086", "0.6813224", "0.65404683", "0.6508815", "0.6496566", "0.6474151", "0.63565713", "0.63440263", "0.6343961", "0.6335834", "0.63289756", "0.63112545", "0.6272625", "0.62513417", "0.6242469", "0.6241716", "0.61991626", "0.6170931", "0.6164732", "0.61421...
0.73906344
0
Siftdown the first node (start) in the given max heap.
def sift_down(heap, start, end): # Swap first node with children until no longer smaller. i = start heaped = False while not heaped: left = i * 2 + 1 right = i * 2 + 2 largest = i # Find largest of i, left and right if left < end and compare(heap[left], heap[largest]) > 0: largest = left if right < end and compare(heap[right], heap[largest]) > 0: largest = right # If left or right is larger than i, swap and repeat if largest == i: heaped = True else: heap[i], heap[largest] = heap[largest], heap[i] i = largest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __siftdown(heap, nodes, pos, stopPos = None):\n # Default stopping position to end of heap\n stopPos = stopPos if not None else len(heap) - 1\n \n # Loop until past stopping position\n while pos < stopPos:\n # Set right and left child positions\n rChildP...
[ "0.69919413", "0.67116565", "0.64125484", "0.63812757", "0.6372654", "0.63402355", "0.63230765", "0.6284911", "0.62778044", "0.61587197", "0.6144824", "0.60992014", "0.60664123", "0.60273576", "0.6023935", "0.6000229", "0.59493095", "0.59148705", "0.5806669", "0.58008105", "0...
0.6062711
13
Reorder a given array into a max heap.
def heapify(array): # Start by sifting down the first parent node n = len(array) node = (n - 2) // 2 # Sift down all nodes, finishing with the root while node >= 0: sift_down(array, node, n) node -= 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heap_sort(array):\n highest_index = len(array)-1\n Heap.heapify(array, highest_index)\n for end in range(highest_index, 0, -1):\n array[end], array[0] = array[0], array[end]\n Heap.sift_down(array, 0, end-1)", "def heapify(array, highest_index):\n first = (hi...
[ "0.7620014", "0.7408683", "0.7295518", "0.7191452", "0.7178755", "0.71406955", "0.7130052", "0.70784956", "0.70300376", "0.6989501", "0.69802624", "0.6953432", "0.6950196", "0.6909327", "0.69041264", "0.68877125", "0.68816626", "0.6856917", "0.6704577", "0.66487557", "0.66159...
0.69547015
11
Sort a list inplace via heapsort.
def heapsort(array): # Turn the entire array into a heap heapify(array) # Repeatedly extract the root from the heap into a sorted sublist n = len(array) while n > 1: array[0], array[n - 1] = array[n - 1], array[0] n -= 1 sift_down(array, 0, n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heap_sort(list):\n pass", "def heap_sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def heap_sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def sort(items):\n ...
[ "0.8531894", "0.8091181", "0.8091181", "0.7828576", "0.76227224", "0.75424427", "0.7507779", "0.7426823", "0.7360591", "0.7360591", "0.7310706", "0.7231151", "0.7190549", "0.718873", "0.71057963", "0.7078807", "0.7069645", "0.7058231", "0.70537895", "0.6986497", "0.6827635", ...
0.6723859
22
Shuffle a list by partition into n piles.
def pile_shuffle(array, n): result = [] for i in reversed(range(n)): result += array[i::n] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def partition(lis: list, n: int):\n # prevent destroying the original dataset\n lis_cp = copy.deepcopy(lis)\n random.shuffle(lis_cp)\n if len(lis) > n:\n return [lis_cp[i::n] for i in range(n)]\n else:\n return [[lis_cp[i]] for i in range(len(lis))]", "def partition(self, lst, n):\n ...
[ "0.79827976", "0.7390744", "0.7346892", "0.7062788", "0.7060552", "0.6915428", "0.6911047", "0.67345446", "0.66697764", "0.6515333", "0.6464317", "0.6338885", "0.6338718", "0.6222252", "0.62007576", "0.6193716", "0.61343354", "0.61315876", "0.6106556", "0.60934514", "0.609047...
0.7378045
2
Shuffle a list by recursively pileshuffling each pile.
def recursive_pile_shuffle(array, n): # Base case for empty or singular list if len(array) < 2: return array # Pile-shuffle and recur on each of n piles piles = [array[i::n] for i in reversed(range(n))] result = [] for pile in piles: result += recursive_pile_shuffle(pile, n) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shuffle_list(self, tour_list, pop_size):\n x = np.array(tour_list)\n while len(self.pop_group) < self.shuffle_population:\n y = np.random.permutation(x)\n if not any((y == x).all() for x in self.pop_group):\n self.pop_group.append(y.tolist())", "def shuffle_...
[ "0.680971", "0.6636288", "0.6632501", "0.66021633", "0.6471498", "0.64384234", "0.64200294", "0.632114", "0.630535", "0.62996954", "0.62684", "0.61987203", "0.6188611", "0.61510146", "0.6112788", "0.6100381", "0.6089439", "0.60330445", "0.60330445", "0.60076684", "0.5993398",...
0.7225837
0
returns output of new layer
def l2_reg_create_layer(prev, n, activation, lambtha): reg = tf.contrib.layers.l2_regularizer(lambtha) init = tf.contrib.layers.variance_scaling_initializer(mode="FAN_AVG") t = tf.layers.Dense(units=n, activation=activation, kernel_initializer=init, kernel_regularizer=reg, ) return t(prev)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_output(self, name, input_layer):\n self.model.add_output(name=name, input=input_layer)\n self.output = name", "def get_output(self, **kwargs):\n with tf.variable_scope(self.layer_scope):\n return self.out", "def get_output(self, prev_layers=None, **kwargs):\n if ...
[ "0.69171226", "0.6876133", "0.6818037", "0.6814047", "0.68065774", "0.67997515", "0.6749717", "0.6734782", "0.6734782", "0.6733793", "0.67300725", "0.67230237", "0.67041695", "0.6663061", "0.6633519", "0.65794", "0.6576216", "0.6576216", "0.6570509", "0.6560505", "0.65560645"...
0.0
-1
OAuth2 compatible token login, get an access token for future requests
async def login_access_token( form_data: OAuth2PasswordRequestForm = Depends() ): user = await crud.user.authenticate( username=form_data.username, password=form_data.password ) if not user: raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail="Incorrect credentials") elif not user.is_active: raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail="Inactive user") elif not user.is_email_verified: raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail="Please verify your account via email") access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES) return { "access_token": create_access_token( data={"user_id": user.id}, expires_delta=access_token_expires ), "token_type": "bearer", }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\n user = auth_handler.authenticate_user(\n username=form_data.username, password=form_data.password\n )\n if user is None:\n raise HTTPException(\n detail=\"Incorrect username and/or password\", status_code=...
[ "0.7499437", "0.7365008", "0.7298682", "0.72800106", "0.72585416", "0.7221305", "0.71564126", "0.7155743", "0.71516013", "0.7135813", "0.70970845", "0.70884335", "0.705156", "0.7044395", "0.70353854", "0.7016695", "0.70149344", "0.7002961", "0.70015967", "0.69658285", "0.6939...
0.74189544
1
Create new account and send verification email with token to user.
async def register( username: str = Form(...), password: str = Form(...), email: EmailStr = Form(...), first_name: str = Form(...), last_name: str = Form(None) ): user = await crud.user.get_by_email(email=email) if user: raise HTTPException( status_code=HTTP_409_CONFLICT, detail="The user with this email already exists in the system", ) user = await crud.user.get_by_username(username=username) if user: raise HTTPException( status_code=HTTP_409_CONFLICT, detail="Username already taken", ) user = UserCreate(username=username, password=password, email=email, first_name=first_name, last_name=last_name, is_email_verified=False ) user_id = await crud.user.create(user) register_token = create_register_token(data={"email": user.email}) if send_verify_account_email( email=user.email, username=user.username, first_name=user.first_name, token=register_token ): return {"msg": "New account email sent, check your inbox to verify your account"} else: await crud.user.remove(user_id) raise HTTPException( status_code=HTTP_500_INTERNAL_SERVER_ERROR, detail="Error while trying to send email, please try again", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, request):\n try:\n new_usr = User(username=request.data.get(\"username\"), first_name=request.data.get(\"first_name\"),\n email=request.data.get(\"email\"), password=request.data.get(\"password\"))\n serializer = RegisterUserSer(new_usr)\n ...
[ "0.7170047", "0.70211226", "0.69629407", "0.6867402", "0.6856062", "0.6824182", "0.67592835", "0.6749904", "0.67101693", "0.67071414", "0.6690668", "0.66064966", "0.66052914", "0.66018295", "0.65891904", "0.6588237", "0.6583577", "0.6559786", "0.6558199", "0.6554934", "0.6548...
0.0
-1
Verify account using token.
async def verify_account( token: str = Form(...) ): email = await verify_register_token(token) if not email: raise HTTPException(status_code=400, detail="Invalid email verify token") record = await crud.user.get_by_email(email) if not record: raise HTTPException( status_code=404, detail="The user with this email does not exist in the system." ) user = DBUser(**record) if user.is_email_verified: raise HTTPException( status_code=HTTP_409_CONFLICT, detail="User already verified", ) await crud.user.update(user.id, {'is_email_verified': True}) send_new_account_email(email=user.email, username=user.username, first_name=user.first_name) return {"msg": "Account verified"}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def verify(token: TextData, background_tasks: BackgroundTasks):\n token_data = token.data\n mail, subject, body = await AccountProcessor.confirm_email(token_data)\n background_tasks.add_task(Utility.validate_and_send_mail, email=mail, subject=subject, body=body)\n return {\"message\": \"Account V...
[ "0.73761034", "0.71671677", "0.7066079", "0.6981067", "0.6979663", "0.68349594", "0.68256533", "0.6713865", "0.67072505", "0.6704424", "0.6683784", "0.66674215", "0.6619582", "0.66139793", "0.65695435", "0.65394056", "0.6536171", "0.6521967", "0.6393953", "0.6376964", "0.6354...
0.7953528
0
Returns the number of frames in the trajectory in universe u, using teq as equilibration time and tsample as sampling time
def traj_nslice (u,teq,tsample) : # get the number of frames in the slice (http://stackoverflow.com/a/7223557) traj_slice = u.trajectory[teq::tsample] return sum(1 for _ in traj_slice)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_tracked_samples(self, u=None):\n u = self.virtual_root if u is None else u\n return self._ll_tree.get_num_tracked_samples(u)", "def frameTimes(self):\n sr = self.sampleRate\n offset = self.activeOffset\n stride = self.activeStride\n nf = self.numFrames\n t...
[ "0.5920934", "0.5774861", "0.5752458", "0.54901206", "0.5386585", "0.53777325", "0.53337634", "0.53329504", "0.52839094", "0.52773035", "0.5194862", "0.51744634", "0.5169164", "0.51685876", "0.51301676", "0.51198584", "0.5106076", "0.51057935", "0.50803345", "0.50795835", "0....
0.69339037
1
Calculate the Pearson correlation coefficient between the row sum of the given HiC matrix and the given ChIPseq profile.
def hic_chipseq_r2 (hic, chipseq) : hic_rowsum = np.sum(hic,axis=1)/float(np.sum(hic)) return np.corrcoef(hic_rowsum,chipseq)[0,1]**2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pearsons_contingency_coefficient_compute(confmat: Tensor) ->Tensor:\n confmat = _drop_empty_rows_and_cols(confmat)\n cm_sum = confmat.sum()\n chi_squared = _compute_chi_squared(confmat, bias_correction=False)\n phi_squared = chi_squared / cm_sum\n tschuprows_t_value = torch.sqrt(phi_squared / (...
[ "0.65713036", "0.60864913", "0.6080404", "0.6041075", "0.5945653", "0.5941", "0.5863057", "0.57821524", "0.5782027", "0.5723004", "0.5652084", "0.5646357", "0.5644976", "0.56440914", "0.5639138", "0.5633804", "0.56248856", "0.5624098", "0.5592133", "0.5590622", "0.5569893", ...
0.62098205
1
Calculate the normalized probability of contact between a monomer and all others as a function of the linear distance s.
def ps (H) : p = np.array ([np.mean (np.diagonal (H, offset=k)) for k in range (H.shape[0])]) return p/np.sum(p)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))", "def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))", "def norm_dist(numbers, x):\r\n m = np.mean(np.array(numbers))\r\n s = np.std(np.array(numbers))\r\n if s < 1e-5:\r\n s = 1e-5...
[ "0.59207755", "0.59207755", "0.58610827", "0.58582896", "0.58436716", "0.5814182", "0.579739", "0.5756278", "0.57502955", "0.57094455", "0.56843543", "0.5625571", "0.56150544", "0.5607217", "0.55954623", "0.5591235", "0.5581353", "0.5579577", "0.5574086", "0.5562843", "0.5559...
0.0
-1
Calculate the relative proportion of contacts of the tracers with binding sites compared with nonbinding sites. As usual user should supply equilibration time, sampling time, and contact threshold value.
def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) : # select polymer, tracers, and binding sites polymer = sim.u.select_atoms (polymer_text) tracers = sim.u.select_atoms (tracers_text) bss = sim.u.select_atoms (bindingsites_text) # select binding site indices bs_n = bss.n_atoms bs_idx = bss.indices # select non-binding site indices polymer_idx = polymer.indices nbs_idx = np.setdiff1d (polymer_idx,bs_idx) nbs_n = nbs_idx.size # evaluate contacts with binding sites and non-binding sites for each # independent simulation snapshot c = [] for i,ts in enumerate(sim.u.trajectory[teq::tsample]) : d = distance_array (polymer.positions,tracers.positions, box=ts.dimensions) contacts = d<threshold cB = np.sum (contacts[bs_idx]).astype('float') cA = np.sum (contacts[nbs_idx]).astype('float') if cA != 0 : c.append ((cB/cA) / (float(bs_n)/nbs_n)) return np.mean(np.array(c))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact_probability(summary, results, contacts, bins, feature):\r\n\r\n # prepare sampling interval size\r\n bin_width = bins[2]\r\n n_bins = int(np.ceil(bins[1]/bin_width))\r\n # bin bounds\r\n end_bins = np.arange(bin_width,bin_width*(n_bins+1), bin_width)\r\n # prepare arrays\r\n # two ...
[ "0.596737", "0.547867", "0.54080415", "0.5299857", "0.5124064", "0.5097309", "0.5068922", "0.50371313", "0.5016846", "0.4988263", "0.4936729", "0.49304453", "0.49304453", "0.48953757", "0.48854768", "0.4878589", "0.4844149", "0.48417434", "0.48266906", "0.48197976", "0.480789...
0.6338657
0
Perform a simple fit of the supplied timedependent MSD, using a linear regression of the logarithms of the values. User must supply the conversion factor from time to real time and from length to real length. Also, user
def fit_msd (msd,cutoff,delta_t,scale_l) : # prepare the values to fit: exclude the first value because it is zero t = np.arange(msd.size)*delta_t x = np.log(t[cutoff:]) y = np.log(msd[cutoff:]*scale_l**2) # perform fit to y = ax + b with their errors b,a,db,da = mbt.linear_regression (x,y,0.99) # now convert the value of b into a diffusion coefficient D = np.exp(b)/6.0 dD = np.exp(db)/6.0 return a,da,D,dD
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_model(train_ts_dis, data, init_prior = [.5,.5], bias = True, mode = \"biasmodel\"):\r\n if mode == \"biasmodel\":\r\n #Fitting Functions\r\n def bias_fitfunc(rp, tsb, df):\r\n init_prior = [.5,.5]\r\n model = BiasPredModel(train_ts_dis, init_prior, ts_bias = tsb, recu...
[ "0.6344083", "0.59664893", "0.5950108", "0.58820957", "0.57336086", "0.5699005", "0.56805956", "0.5650658", "0.5647881", "0.5618573", "0.5590892", "0.55860865", "0.5570882", "0.556389", "0.5562849", "0.55571467", "0.55424637", "0.547708", "0.5467977", "0.54346925", "0.5430097...
0.668302
0
Calculate the mean square displacement of the particles defined by 'particles_text' in simulation sim, using sampling tsample and equilibration time teq. Returns the matrix corresponding to the mean square displacement of each particle, along with a matrix corresponding to the variance in the estimate of this quantity.
def msd_t (sim,particles_text,teq,tsample) : u = sim.u particles = u.select_atoms (particles_text) nparticles = particles.n_atoms nslice = traj_nslice (u,teq,tsample) # initialize the matrix containing all the positions # of the particles at all the sampling frames particles_pos = np.zeros ((nslice,nparticles,3)) for i,ts in enumerate(u.trajectory[teq::tsample]) : particles_pos[i,:,:] = particles.positions # now initialize the Delta matrix, which contains the # squared differences between the particles' positions # at different time delays Nt = int(nslice/2) Delta = np.zeros((nparticles,Nt,Nt)) for delay in xrange(1,Nt+1) : for t0 in xrange (Nt) : t1 = t0 + delay pos1 = particles_pos[t1,:,:] pos0 = particles_pos[t0,:,:] Delta[:,delay-1,t0] = np.sum((pos1-pos0)**2,axis=1) # return the matrices of MSD and its variance return np.mean(Delta,axis=2),np.var(Delta,axis=2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def msd_t(sim,particles_text,teq,tsample) :\n u = sim.u\n particles = u.select_atoms(particles_text)\n nparticles = particles.n_atoms\n nslice = traj_nslice (u,teq,tsample)\n # initialize the matrix containing all the positions\n # of the particles at all the sampling frames\n particles_pos = ...
[ "0.7488713", "0.5663224", "0.5223949", "0.51519114", "0.5096424", "0.5066858", "0.5066858", "0.5045784", "0.5026792", "0.50021636", "0.50003314", "0.49920407", "0.49362248", "0.4934721", "0.4924958", "0.49151853", "0.4903255", "0.48586112", "0.48389977", "0.4803725", "0.47902...
0.7475914
1
Calculate the minimum distance between the atoms defined in sel1 and the atoms defined in sel2, as a function of time. Returns a matrix that contains the minimum distance for each atom defined in sel1. As usual user should supply equilibration time, sampling time, and contact threshold value.
def dmin_sel (sim,sel1_text,sel2_text,teq,tsample) : # define atom selections sel1 = sim.u.select_atoms (sel1_text) sel2 = sim.u.select_atoms (sel2_text) # get number of atoms in selection 1 natoms = sel1.n_atoms nslice = traj_nslice (sim.u,teq,tsample) dmin = np.zeros((natoms,nslice)) for i,ts in enumerate(sim.u.trajectory[teq::tsample]) : d = distance_array (sel1.positions,sel2.positions, box=ts.dimensions) dmin[:,i] = d.min(axis=1) return dmin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minimum_subset_distance(D, limits1, limits2):\n score = numpy.ones( (limits1[1]) )\n for i in xrange(limits1[1]):\n for j in xrange(limits2[1]-limits2[0]):\n score[i] = min(score[i], D[i,j+limits2[0]-1])\n #print i, j, D[i,j+limits2[0]-1], score[i], min(score[i], D[i,j+limits...
[ "0.5895745", "0.5762979", "0.57414365", "0.5732029", "0.54276377", "0.5345767", "0.5334685", "0.5314768", "0.5237118", "0.51300615", "0.51152414", "0.50853014", "0.5079385", "0.50745434", "0.5033808", "0.50107294", "0.5001764", "0.49995542", "0.4967053", "0.49510542", "0.4891...
0.76610744
0
Get the image index of all particles in simulation, at the frame 'frame_id'
def particle_images (sim,frame_id) : # get positions of all particles: define first the atom selection, then jump to # the user-requested trajectory frame, get the box dimensions (currently works # only for orthorhombic boxes, then calculate the image indices atoms = sim.u.select_atoms ('all') ts = sim.u.trajectory[frame_id] L = ts.dimensions[:3] pos = atoms.positions + L/2. return pos//L
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_id_at(self, i):\n return i", "def _get_frame_index(self, frame):\n if isinstance(frame, cf.CoordinateFrame):\n frame = frame.name\n #frame_names = [getattr(item[0], \"name\", item[0]) for item in self._pipeline]\n frame_names = [step.frame if isinstance(step.frame...
[ "0.64790046", "0.6461773", "0.63308996", "0.60192746", "0.5952561", "0.5895804", "0.58919054", "0.5886345", "0.5864442", "0.5850456", "0.58418465", "0.5816864", "0.57517457", "0.57352793", "0.5733944", "0.5714067", "0.56858724", "0.5682771", "0.5660249", "0.5656632", "0.56062...
0.82224417
0
Calculate the matrix that represents the number of times that the tracers (defined by 'tracer_text') jump from one site to another site of the polymer (defined by 'polymer_text'). The simulation 'sim' is sampled at 'tsample', excluding the first 'teq' time frames. Contact between a tracer and the polymer is defined by the distance being smaller than 'threshold'.
def jumping_matrix (sim,polymer_text,tracer_text,teq,tsample,threshold) : # define polymer and tracers u = sim.u polymer = u.select_atoms(polymer_text) tracers = u.select_atoms(tracer_text) n_polymer = polymer.n_atoms n_tracers = tracers.n_atoms # initialize jumping matrix and first distance matrix d_prev J = np.zeros ((n_polymer,n_polymer),dtype=np.int32) ts = u.trajectory [teq] d_prev = distance_array (polymer.positions,tracers.positions, box=ts.dimensions) D_prev = d_prev<threshold for ts in u.trajectory [teq::tsample] : # get distance matrix at current time step d_next = distance_array (polymer.positions,tracers.positions, box=ts.dimensions) D_next = d_next<threshold # get jumps of all tracers and add it to the jumping matrix for i in xrange (n_tracers) : t_prev = D_prev [:,i] t_next = D_next [:,i].reshape ((n_polymer,1)) t = t_prev * t_next J += t D_prev = D_next.copy() return J
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) :\n u = sim.u\n polymer = u.select_atoms (polymer_text)\n tracers = u.select_atoms (tracer_text)\n ntracers = tracers.n_atoms\n npolymer = polymer.n_atoms\n nslice = mbt.traj_nslice(u,teq,tsample)\n C = np.zeros((ntracers,nsli...
[ "0.7618557", "0.74560314", "0.744143", "0.7299032", "0.5812742", "0.5133513", "0.4958529", "0.49087512", "0.48836043", "0.48615563", "0.480445", "0.4792166", "0.4757305", "0.4745062", "0.4716135", "0.46702787", "0.46492743", "0.46094853", "0.45879653", "0.45620257", "0.455755...
0.7318586
3
For the simulation 'sim', calculate the matrix of binding events of the polymer and the tracers. Returns a contacts matrix of the shape (ntracers,nslice,npolymer).
def contacts_t (sim,polymer_text,tracer_text,teq,tsample,threshold) : u = sim.u polymer = u.select_atoms (polymer_text) tracers = u.select_atoms (tracer_text) ntracers = tracers.n_atoms npolymer = polymer.n_atoms nslice = mbt.traj_nslice(u,teq,tsample) C = np.zeros((ntracers,nslice,npolymer),dtype=bool) for i,ts in enumerate(u.trajectory [teq::tsample]) : d = distance_array (tracers.positions,polymer.positions, box=ts.dimensions) c = d<threshold C[:,i,:] = c return C
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contacts_with (sim,polymer_text,tracers_text,bindingsites_text,teq,tsample,threshold) :\n # select polymer, tracers, and binding sites\n polymer = sim.u.select_atoms (polymer_text)\n tracers = sim.u.select_atoms (tracers_text)\n bss = sim.u.select_atoms (bindingsites_text)\n # select binding sit...
[ "0.64249676", "0.56675434", "0.5327914", "0.5293578", "0.5202153", "0.5148195", "0.51123756", "0.504556", "0.5030026", "0.50254846", "0.49442828", "0.48849455", "0.48715222", "0.47897685", "0.47662282", "0.472734", "0.47106746", "0.47045848", "0.46944386", "0.46944386", "0.46...
0.58174694
1
Calculate the matrix of average intrapolymer distances. User must supply the parameters teq, tsample and threshold.
def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) : u = sim.u polymer = u.select_atoms (polymer_text) N = polymer.n_atoms nslice = mbt.traj_nslice (u,teq,tsample) d = np.zeros((N,N)) for i,ts in enumerate(u.trajectory[teq::tsample]) : this_d = distance_array(polymer.positions, polymer.positions, box=ts.dimensions) d = mbt.new_average(i,d,this_d) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_qavg(self, TRANGE = []):\n #put some variables in this namespace\n nebins=self.nebins\n nqbins=self.nqbins\n binenergy=self.binenergy\n binq=self.binq\n visits2d=self.visits2d\n logn_Eq=self.logn_Eq\n \n if len(TRANGE) == 0:\n NTEMP = 1...
[ "0.5694141", "0.5119244", "0.5017542", "0.49339455", "0.49261585", "0.48504174", "0.4848955", "0.48481247", "0.48393378", "0.4766152", "0.47565967", "0.47392762", "0.4727343", "0.47033104", "0.46980202", "0.46610996", "0.46503145", "0.4611686", "0.46015757", "0.45886338", "0....
0.66201276
0
This function does the complete analysis of the tracers in the simulation. It calculates the virtual HiC, virtual ChIPseq, KullbackLeibler divergence between the two profiles as a function of time, and coverage of the tracers.
def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) : # define DKL(t) vector nframes = traj_nslice(sim.u,teq,tsample) DKL_t = np.zeros(nframes) # define polymer and tracers polymer = sim.u.select_atoms(polymer_text) tracers = sim.u.select_atoms(tracer_text) N = polymer.n_atoms ntracers = tracers.n_atoms # init H and C vectors H = np.zeros((N,N),dtype=np.int32) C = np.zeros((N,ntracers),dtype=np.int32) # analyze all simulation frames as decided for i,ts in enumerate(sim.u.trajectory[teq::tsample]) : # calculate Hi-C at this time frame d = distance_array(polymer.positions,polymer.positions,box=ts.dimensions) H += (d<p_threshold) Rt = H.sum(axis=1) # calculate ChIP-seq at this time frame c = distance_array(polymer.positions,tracers.positions,box=ts.dimensions) C += (c<t_threshold) Ct = C.sum(axis=1) DKL_t[i] = mbt.KL_divergence(Ct,Rt) # coverage analysis C[C>1] = 1 coverage = C.sum(axis=0).astype('float')/N return DKL_t,H,Ct.astype(np.int64),coverage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tracers_analysis (sim,polymer_text,tracer_text,teq,tsample,t_threshold,p_threshold) :\n # define DKL(t) vector\n nframes = traj_nslice(sim.u,teq,tsample)\n DKL_t = np.zeros(nframes)\n # define polymer and tracers\n polymer = sim.u.select_atoms(polymer_text)\n tracers = sim.u.select_atoms(trac...
[ "0.63561374", "0.5775331", "0.5715415", "0.5611014", "0.5607633", "0.55742955", "0.54829526", "0.5457336", "0.54125506", "0.5397806", "0.5258343", "0.5246536", "0.52320576", "0.5230727", "0.5224489", "0.5222911", "0.52141297", "0.52132136", "0.52128506", "0.52064836", "0.5187...
0.6349215
1
Turns off the bot
async def poweroff(ctx): await ctx.send("Bye") await bot.logout()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_off(self, **kwargs):\n self._send_command(\"turn_off\")", "def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()", "def turn_off(self, **kwargs):\n set_sonoff_state(self._host, \"off\")\n self._state = False", ...
[ "0.7935323", "0.78928596", "0.7686037", "0.75895715", "0.75502986", "0.75314146", "0.7467795", "0.746141", "0.7441158", "0.7391506", "0.7319515", "0.731269", "0.7301029", "0.72807145", "0.72749", "0.72747", "0.7204974", "0.71899074", "0.71870434", "0.7155394", "0.71427", "0...
0.6489319
94
A little help always comes handy
async def _help(ctx, *, command_name: str=None): if command_name: command = bot.get_command(command_name) if not command: return await ctx.send("No such command!") return await ctx.send(f"```\n{ctx.prefix}{command.name} {command.signature}\n\n{command.help or 'Missing description'}```") description = [] for name, cog in bot.cogs.items(): entries = [" - ".join([cmd.name, cmd.short_doc or "Missing description"]) for cmd in cog.get_commands() if await _can_run(cmd, ctx) and not cmd.hidden] if entries: description.append(f"**{name}**:") description.append("• " + "\n• ".join(entries)) await ctx.send(embed=discord.Embed(description="\n".join(description), color=ctx.me.color))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help():", "def help():\n \n pass", "def printhelp():", "def help(self):", "def help(self):", "def sth():", "def usage():", "def usage():", "def help_me():\n print(\"i'm trapped\")", "def help():\n print \"Help comes to those who ask\"", "def help(self):\n pass", ...
[ "0.7713086", "0.7395725", "0.73808426", "0.730104", "0.730104", "0.71486044", "0.7137485", "0.7137485", "0.70060796", "0.69973403", "0.68186736", "0.68186736", "0.6813253", "0.6768411", "0.6729083", "0.6727921", "0.65956175", "0.6553707", "0.65402293", "0.65402293", "0.654022...
0.0
-1
print results from end global state
def print_result(global_params, end_global_state, quiet_print=False): global_state = python_funcs.GlobalState() global_state.decode(end_global_state, global_params) if not quiet_print: print("Centroids: \n{}".format(global_state.centroids_aux)) print("Iterations: {}".format(global_state.iteration))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_results(self):\n pass", "def print_out():\n pass", "def state_print_exit(cfg, app, win):", "def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False", "def flush_output():\n if len(buffered) == 1:\n code.add_lin...
[ "0.7087874", "0.68458045", "0.6671397", "0.65722066", "0.64805436", "0.6441369", "0.64383215", "0.64282376", "0.6374665", "0.63660663", "0.6346737", "0.63044566", "0.6283972", "0.62697494", "0.62408197", "0.61895734", "0.61799115", "0.61781204", "0.6157988", "0.61376", "0.612...
0.65808356
3
Nonblocking file lock context manager
def file_lock(filed, exclusive=False): opcode = [fcntl.LOCK_SH, fcntl.LOCK_EX][exclusive] | fcntl.LOCK_NB try: try: fcntl.flock(filed, opcode) except IOError, err: if not (err.errno == errno.EAGAIN or err.errno == errno.EACCES): raise else: yield False else: yield True except: raise finally: fcntl.flock(filed, fcntl.LOCK_UN)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lock_file_manager(file_name, mode='a+'):\n import fcntl\n with open(file_name, mode) as file_descriptor:\n try:\n fcntl.flock(file_descriptor, fcntl.LOCK_EX)\n yield file_descriptor\n finally:\n fcntl.flock(file_descriptor, fcntl.LOCK_UN)", "def _file_lock...
[ "0.72453177", "0.71964896", "0.6902446", "0.69000673", "0.6865638", "0.6817763", "0.67962337", "0.67926115", "0.67845774", "0.6774534", "0.67527604", "0.6738878", "0.6731895", "0.6731811", "0.66535723", "0.66099846", "0.66014665", "0.65571654", "0.6515045", "0.65061176", "0.6...
0.6644282
15
Initalize name and age attibutes.
def __init__(self, name, age): self.name = name self.age = age
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(...
[ "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.74732316", "0.7472384", "0.736729", "0.7351724", "0.7351724", "0.7351724", "0.7351724", "0.72921795", "0.7189177", "0.7124933", "0.711583", "0.6997387", "0.6881059", "0.686831...
0.75609356
1
sulate a caricter shotting a Shuriken in a responce to commond.
def Shuriken(self): print(self.name.title() + " is now shotting.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def shibe(self, ctx: Message):\n\t\timage_url = requests.get(\"https://shibe.online/api/shibes?count=1\").json()[0]\n\t\tawait self.send(image_url, whisper=[ctx.author.id])", "async def shibe(self, ctx: DogbotContext):\n async with ctx.typing():\n try:\n resp = await utils....
[ "0.6028164", "0.60064155", "0.56720436", "0.5613526", "0.5598986", "0.5586523", "0.558651", "0.55609393", "0.5525008", "0.5505774", "0.55052584", "0.5490252", "0.5482178", "0.5477882", "0.54740316", "0.5473472", "0.5458496", "0.5421021", "0.53871614", "0.5370366", "0.5358281"...
0.5446821
17
simulate a caricter use Deflect in a responce to command.
def Deflect(self): print(self.name.Title() + "Deflect!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _defacto(self, ctx: commands.Context):\n responses = ['DI FACTO', 'di facto']\n await ctx.send(random.choice(responses))", "async def _conoscitore(self, ctx: commands.Context):\n responses = ['de facto, di facto', 'fanculizzati','Tra il lusco e il brusco,tra il serio e il profano,tra ...
[ "0.6985522", "0.6342284", "0.60938257", "0.6028364", "0.59290314", "0.58536536", "0.5789165", "0.5776093", "0.5768388", "0.5695349", "0.5676105", "0.5642792", "0.56084", "0.5593312", "0.559221", "0.55562896", "0.5551798", "0.55446804", "0.55304146", "0.55226284", "0.5518135",...
0.0
-1
simulate a caricter useing Dash in a responce to command.
def Dash(self): print(self.name.Title() + " Dash!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command():\n pass", "def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response", "def do_command(command):\n send_command(command)\n response = get_resp...
[ "0.61657566", "0.6023819", "0.5984443", "0.5877229", "0.58400345", "0.57541966", "0.5736261", "0.56955343", "0.56944484", "0.56820494", "0.5672572", "0.5652428", "0.5651466", "0.5642728", "0.5610112", "0.5601906", "0.5577275", "0.5572056", "0.5553276", "0.5535632", "0.5515353...
0.53503895
43
simulate a caricter useing Dragon Balde in a responce to command.
def Dragon_Blade(self): print(self.name.Title() + " Dragon blade!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command():\n pass", "def on_command(self, game) -> None:\n pass", "def command(facade, note):\n print facade, note", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def buzz_subroutine(self):\n...
[ "0.5954689", "0.5898735", "0.5884948", "0.5832001", "0.5795689", "0.5784766", "0.5779325", "0.5699505", "0.56711584", "0.5627399", "0.5624277", "0.55758893", "0.5572339", "0.556279", "0.55591536", "0.5543098", "0.5513337", "0.5504965", "0.5499147", "0.54891926", "0.5485525", ...
0.0
-1
Get the shape of an element x. If it is an element with a shape attribute, return it. If it is a list with more than one element, compute the shape by checking the len, and the shape of internal elements. In that case, the shape must be consistent. Finally, in other case return () as shape.
def get_shape(x): if isinstance(x, list) and len(x) > 0: shapes = [get_shape(subx) for subx in x] if any([s != shapes[0] for s in shapes[1:]]): raise ValueError('Parameter dimension not consistent: {}'.format(x)) return (len(x), ) + shapes[0] else: if hasattr(x, '_shape_tuple'): return x._shape_tuple() # method to return the shape as a tuple elif hasattr(x, 'shape'): return tuple(x.shape) else: return ()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shape(x):\n\n return None if jnp.isscalar(x) else x.shape", "def shape(self):\n for component in ('x', 'y', 'z', 'r', 't'):\n arr = getattr(self, component)\n if arr is not None:\n return arr.shape\n return ()", "def shape(self) -> Optional[tuple]:\...
[ "0.7307297", "0.71993625", "0.69666064", "0.6919222", "0.6845282", "0.68373024", "0.6807176", "0.6639613", "0.6614478", "0.65794605", "0.65702397", "0.6511562", "0.6511562", "0.64650935", "0.64642006", "0.64271873", "0.6416693", "0.6414622", "0.6394792", "0.637807", "0.637807...
0.824383
0
Return the next state of a neuron.
def alter_state(neuron, inp): # Refractory period (simplified) if neuron['state'] == 1: return 0 # Firing threshold (simplified) elif inp >= 3: return 1 # Random firing probability elif random() < random_fire_prob: return 1 else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next(self):\n\t\tassert(len(self.past_values) < 256**3)\n\t\twhile self.advance_state() in self.past_values:\n\t\t\tpass\n\t\tself.past_values.add(self.state)\n\t\treturn self.state", "def getNextState(self):\n return None", "def getNextState(self):\n if self.__mode == \"AlphaBeta\":\n ...
[ "0.77205414", "0.7427344", "0.74042726", "0.7253036", "0.72341335", "0.708382", "0.7024877", "0.6923807", "0.68663293", "0.6793928", "0.67842984", "0.6780127", "0.6723994", "0.670868", "0.6659324", "0.65982187", "0.6551091", "0.6523753", "0.65129405", "0.65007645", "0.6497944...
0.0
-1
Function that make the teacher/s reports
def get(self, request, report_type): # Verify if the coordinator is correctly logged in. if not request.session.get('session', False) or not request.session['type'] == 'coordinator': return render(request, self.template_login) template = '' context = {} # Depending the option given return de requested reports. if report_type == 'teacher_report': template, context = self.teacher_report(request) elif report_type == 'career_teachers_report': template, context = self.career_teachers_report(request) elif report_type == 'career_teachers_excel': return self.career_teachers_excel(request) # If there is a great request render the PDF's, otheway redirect to the reports view. if template and context: return render_to_pdf_response(request, template, context) return redirect('/evaluations/career_results/32/47740/#reportes')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_quer...
[ "0.63246506", "0.60513675", "0.604514", "0.6042579", "0.60300845", "0.5960875", "0.5941385", "0.5932449", "0.59081924", "0.5858987", "0.58584124", "0.58095837", "0.5787153", "0.57824606", "0.57559067", "0.57432735", "0.5736098", "0.56988525", "0.5692161", "0.5679412", "0.5599...
0.52245754
66
Render a PDF with the career signatures results with their teachers, this is the deliverable document for the teacher
def career_teachers_excel(self, request): # Get the career to be processed their results. career_id = request.GET.get('career_id', '') career = EvaluationsCareer.objects.get(pk__exact=career_id) # Get the results for each esignature of the carrer en each exam. data = self.get_career_results(career) # Generates the CSV with the results of the career,then return as downloadable file. response = self.get_teacher_results_excel(data) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_pdf(request):\n reg_no = request.user.username\n user = get_object_or_404(User, username=reg_no)\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n \n response = HttpResponse(mimetype='application/pdf')\n response['Conten...
[ "0.6895623", "0.68033445", "0.6658746", "0.6571573", "0.64920616", "0.64909965", "0.6466932", "0.6379791", "0.6377591", "0.6316708", "0.6281751", "0.62604356", "0.62534684", "0.6239564", "0.61678696", "0.6143105", "0.6135836", "0.610623", "0.6105945", "0.6096823", "0.6086271"...
0.5371696
95
Get the signatures results of the teacher in the given career for all the active exams.
def get_teacher_career_results(self, teacher, career): data = [] # Get the active exams of the career. exams = EvaluationsExam.objects.filter( type__exact=career.type, status="ACTIVE") # Get the results for each exam. for exam in exams: # Get the signatures of the teacher for the career in the exam. signatures_dtl = EvaluationsTeacherSignature.objects.filter( fk_teacher__exact=teacher.id, fk_period__exact=exam.fk_period, status="ACTIVE").select_related('fk_signature') signatures_results = [] for signature_dtl in signatures_dtl: # If it raise an exception, it means that the signature isn't evaluated yet or other error. try: # Get the results of the signature. signature_results = EvaluationsSignatureResult.objects.get( group=signature_dtl.group, fk_signature=signature_dtl.fk_signature.id, fk_exam=exam.id, status="ACTIVE" ) # Get the results for each question in the exam for the signature. questions_results = EvaluationsSignatureQuestionResult.objects.filter( group=signature_dtl.group, fk_signature=signature_dtl.fk_signature.id, fk_exam=exam.id, fk_question__optional='NO', status="ACTIVE" ).values_list('fk_question__description', 'result') # Get the comments of the signature/group. comments_result = EvaluationsSignatureQuestionResult.objects.get( group=signature_dtl.group, fk_signature=signature_dtl.fk_signature.id, fk_exam=exam.id, fk_question__optional='YES', status="ACTIVE" ).result # Split the comments and add them to a list, only the ones that are not empty. comments = list(filter(None, comments_result.split('|'))) # Crate a dictionary with the results of the signature and the questions. signatures_results.append({ 'teacher': teacher.name + ' ' + teacher.last_name + ' ' + teacher.last_name_2, 'signature': signature_dtl.fk_signature.description, 'group': signature_dtl.group, 'average': signature_results.average, 'comments': comments, 'total_evaluated': signature_results.total_evaluated, 'questions': questions_results }) except Exception: pass # Add the results to the exam dictionary. exam_results = { 'exam': exam.description, 'career': career.description, 'signatures_results': signatures_results, 'period': exam.fk_period.period } # Add the exam results to the list that will be returned at the end. data.append(exam_results) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def career_teachers_excel(self, request):\n\n # Get the career to be processed their results.\n career_id = request.GET.get('career_id', '')\n career = EvaluationsCareer.objects.get(pk__exact=career_id)\n\n # Get the results for each esignature of the carrer en each exam.\n data ...
[ "0.59553725", "0.5373946", "0.5306007", "0.5172902", "0.5093456", "0.4992784", "0.49038228", "0.49021885", "0.48847973", "0.48658186", "0.4847523", "0.4831599", "0.48291838", "0.4825473", "0.47478107", "0.4725367", "0.47177714", "0.4660074", "0.46355888", "0.46003297", "0.459...
0.7810591
0
Gets the count for the current tab and the count for the conversations in all 4 tabs
def get_all_conversation_type_counts(survey_id, conversation_tab, business_id, category): logger.info( "Retrieving count of threads for all conversation tabs", survey_id=survey_id, conversation_tab=conversation_tab, business_id=business_id, category=category, ) response = _get_conversation_counts(business_id, conversation_tab, survey_id, category, all_conversation_types=True) try: response.raise_for_status() except HTTPError: logger.exception("Thread count failed") raise ApiError(response) logger.info("Count successful") try: totals = response.json()["totals"] # Secure Message uses different identifiers to the tab names used in the ui, this translates the names if "new_respondent_conversations" in totals: totals["initial"] = totals.pop("new_respondent_conversations") if "my_conversations" in totals: totals["my messages"] = totals.pop("my_conversations") totals["current"] = totals[conversation_tab] return totals except KeyError: logger.exception("Response was successful but didn't contain a 'totals' key") raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_conversation_counts(business_id, conversation_tab, survey_id, category, all_conversation_types):\n params = _get_secure_message_threads_params(\n survey_id, business_id, conversation_tab, category, all_conversation_types\n )\n url = f'{current_app.config[\"SECURE_MESSAGE_URL\"]}/messages/c...
[ "0.70647997", "0.5977005", "0.58980954", "0.5830759", "0.58140135", "0.5813447", "0.57283854", "0.5707127", "0.56924605", "0.56898373", "0.5689461", "0.56750286", "0.5671644", "0.5657192", "0.56248367", "0.56144553", "0.5568868", "0.5568868", "0.5531278", "0.55113405", "0.547...
0.6258583
1
Gets the count of conversations based on the params
def _get_conversation_counts(business_id, conversation_tab, survey_id, category, all_conversation_types): params = _get_secure_message_threads_params( survey_id, business_id, conversation_tab, category, all_conversation_types ) url = f'{current_app.config["SECURE_MESSAGE_URL"]}/messages/count' response = requests.get(url, headers={"Authorization": _get_jwt()}, params=params) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNumberOfConversations(node, catalog=None):\n if catalog is None:\n catalog = getToolByName(node, 'portal_catalog')\n return len(catalog(\n object_provides=IConversation.__identifier__,\n path='/'.join(node.getPhysicalPath())))", "def count_chat_with(self, actor_label):\n ...
[ "0.6763089", "0.6620903", "0.64600253", "0.6158435", "0.61192197", "0.6113174", "0.60978407", "0.60919636", "0.6040849", "0.6025221", "0.59833664", "0.59689814", "0.5946903", "0.5919529", "0.59178835", "0.5917685", "0.59037703", "0.5818629", "0.58184177", "0.5817921", "0.5766...
0.71310955
0
creates a params dictionary
def _get_secure_message_threads_params( survey_id, business_id, conversation_tab, category, all_conversation_types=False ): params = { "is_closed": "true" if conversation_tab == "closed" else "false", "my_conversations": "true" if conversation_tab == "my messages" else "false", "new_respondent_conversations": "true" if conversation_tab == "initial" else "false", "category": category, "all_conversation_types": "true" if all_conversation_types else "false", } if business_id: params["business_id"] = business_id if survey_id: params["survey"] = survey_id return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_params(self):\n return {}", "def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n...
[ "0.7703891", "0.7555634", "0.7452079", "0.738078", "0.7275091", "0.7236291", "0.7231462", "0.71239674", "0.7104775", "0.7098227", "0.7097978", "0.7076554", "0.7066415", "0.7065275", "0.70578027", "0.70404214", "0.7035766", "0.7032196", "0.7031023", "0.70140505", "0.7013512", ...
0.0
-1
Parse the given handle of genecard data and the handle of known sequences. This will create a iterable of the entries that are in the file.
def parse(handle, known_handle) -> ty.Iterator[data.Entry]: entries = parser.parse(CONTEXT, handle, known_handle) return map(op.itemgetter(0), entries)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_fasta(self, filename):\n id = ''\n desc = ''\n tempseq = []\n try:\n seqfile = open(filename,'r')\n for line in seqfile:\n if line.startswith('>'):\n if not id is '':\n yield { 'id': id.strip(), 'de...
[ "0.67224", "0.6704314", "0.6645868", "0.654687", "0.64984864", "0.6471601", "0.6452584", "0.64150226", "0.64117914", "0.63433754", "0.62971896", "0.6088554", "0.6052537", "0.59814525", "0.5959283", "0.5950844", "0.5925366", "0.59202296", "0.5879979", "0.58754575", "0.58688813...
0.5930923
16
Open tty connection to sensor
def connect(self): if self.link is not None: self.disconnect() self.link = serial.Serial(self.port, 9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, dsrdtr=True, timeout=5, interCharTimeout=0.1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self):\n # open serial port\n try:\n #device = self.get_device_name(self.serial_number)\n device = \"/dev/ttyAMA0\"\n self.serial.port = device\n # Set RTS line to low logic level\n self.serial.rts = False\n self.serial.ope...
[ "0.6903115", "0.68906033", "0.6732223", "0.65403324", "0.64509165", "0.6344847", "0.6311734", "0.6310994", "0.61750466", "0.6152429", "0.6144728", "0.6142854", "0.6009745", "0.59961694", "0.59875935", "0.59851605", "0.59655786", "0.59528834", "0.59287965", "0.59237635", "0.59...
0.61139464
12
Read data from sensor
def get_status(self) -> Optional[dict]: self.link.write(self._requestSequence) response = self.link.read(9) if len(response) == 9: return { "ppa": response[2] * 0xff + response[3], "t": response[4], "checksum": self._validate_checksum(response), } return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(self):\n return read_sensor(bus=self.bus,\n address=self.address)", "def read_sensor_raw(self):\n return self.read_sensor()", "def read_sensor_raw(self):\n return self.read_sensor()", "def read_from_serial(self):\n self.running = True\n ...
[ "0.7681717", "0.75530523", "0.75530523", "0.7083415", "0.7037263", "0.6966631", "0.69487876", "0.6884216", "0.6849923", "0.6832559", "0.6738604", "0.67027956", "0.6698821", "0.6676869", "0.66277635", "0.6584875", "0.6570664", "0.6531048", "0.65308553", "0.6529524", "0.6517791...
0.0
-1
Check if message contains correct checksum
def _validate_checksum(self, msg: bytes) -> bool: return self._checksum(msg) == msg[8]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_checksum(message, previous_csum=0):\n if message.message_type in CHECKSUM_MSG_TYPES:\n csum = compute_checksum(\n message.checksum[0],\n message.args,\n previous_csum,\n )\n\n if csum == message.checksum[1]:\n return True\n else:...
[ "0.7705907", "0.7587994", "0.752931", "0.7426855", "0.7415338", "0.73797804", "0.7367894", "0.73633546", "0.72925925", "0.72435987", "0.7102735", "0.70819044", "0.702974", "0.70258206", "0.70078015", "0.70042783", "0.6903123", "0.6886536", "0.68748033", "0.68244356", "0.67954...
0.81222075
0
Trigger zero calibration (0PPM for Z14, 400 PPM for Z19)
def zero_calibrationn(self): self.link.write(self._calibrateZeroSequence)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calibration(self, cal: int, /) -> None:", "async def calibrate_zero(self):\n await self.hw_device.set_signal(self.channel)", "def _doCalibration(self):\n self._cmdCalibration(2)", "def photometric_calibration():\n pass", "def calibration(self) -> int:", "def calibration(self, pul...
[ "0.6423298", "0.631655", "0.6298982", "0.6260567", "0.6258085", "0.62078565", "0.6157141", "0.5990511", "0.59166145", "0.5912427", "0.5871232", "0.5840601", "0.5815842", "0.5784849", "0.5768754", "0.5756073", "0.5748982", "0.5732277", "0.57183784", "0.5714611", "0.5675555", ...
0.6548889
0
trigger span point calibration
def span_calibration(self, value: int): msg = bytearray(self._calibrateSpansequence) msg[3] = (value & 0xff00) >> 8 msg[4] = value & 0xff self.link.write(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calibration(self, cal: int, /) -> None:", "def _doCalibration(self):\n self._cmdCalibration(2)", "def calibration(self) -> int:", "def calibrate(self, x, y) -> None:\n pass", "def photometric_calibration():\n pass", "def calibration(self, pulse_min: int, pulse_max: int, pulse_cen...
[ "0.6384731", "0.63308597", "0.62952405", "0.6208644", "0.61908025", "0.61782265", "0.5976409", "0.5935979", "0.58907723", "0.58194613", "0.57763743", "0.57468307", "0.57454205", "0.56647533", "0.56266624", "0.5603869", "0.5593543", "0.5544322", "0.55343956", "0.55329984", "0....
0.5186941
39
The function finds the possible motifs of a certain length in a list of sequences and the number of occurrences of each motif in all sequences.
def find_motifs(seqs, k): motifs = [] motifs_count = Counter() for seq in seqs: motifs_count.update([seq[i:i + k] for i in range(len(seq) - k + 1)]) for num in sorted(motifs_count.keys()): motifs.append((num, motifs_count[num])) return sorted(motifs, key=lambda tup: tup[1], reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def motif_count(sequences, start_at=4, stop_at=8):\n motifs = dict()\n for motif_length in range(start_at, stop_at):\n motifs[motif_length] = dict()\n for sequence in sequences:\n for motif in possible_motifs_by_length(motif_length):\n if motif not in motifs[motif_leng...
[ "0.7417468", "0.71107745", "0.70423144", "0.6438651", "0.63714194", "0.62245244", "0.6186527", "0.6118691", "0.6084201", "0.60029674", "0.6000182", "0.5995448", "0.5979884", "0.5935135", "0.58749336", "0.5838555", "0.5807639", "0.5802357", "0.5797088", "0.57872176", "0.578279...
0.63483924
5
The function receives the transition probability and builds the matrix.
def build_t(p, ind): with np.errstate(divide="ignore"): k = len(ind) trans = pd.DataFrame(np.log(float(0)), ind, ind) lp = np.log(p) trans.loc['B']['B'] = np.log(1 - p) trans.loc['B']['M1'] = lp for i in range(1, k): if i == k - 1: trans.loc['M' + str(k - 1)]['B'] = np.log(float(1)) else: trans.loc['M' + str(i)]['M' + str(i + 1)] = np.log(float(1)) return trans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _transitions_matrix(self):\n trans_iter = (\n self._transitions[sj].logprob(si)\n for sj in self._states\n for si in self._states\n )\n\n transitions_logprob = np.fromiter(trans_iter, dtype=np.float64)\n N = len(self._states)\n return transiti...
[ "0.7077289", "0.7056175", "0.6960447", "0.67535603", "0.66316295", "0.65062433", "0.65017974", "0.6477982", "0.6435336", "0.64129186", "0.63912773", "0.6377895", "0.63283026", "0.62082857", "0.6103854", "0.607584", "0.6042429", "0.6036283", "0.60078543", "0.60078543", "0.5971...
0.5502166
67
The function finds the most common seeds for several lengths and calculated the emissions and transitions matrices for each seed.
def get_seeds(seqs): k_lengths = list(range(6, 20)) final_tuples = {} motifs_dic = {} # find most common seeds seeds = [find_motifs(seqs, k) for k in k_lengths] for i in k_lengths: motifs_dic[i] = seeds[i - 6][0:5] # calculate emissions and transitions global_possible_occurrences = [sum([len(seq) - k + 1 for seq in seqs]) for k in k_lengths] for key in motifs_dic.keys(): key_tuples = [] for m in motifs_dic[key]: seed = m[0] emissions, ind = build_e(seed, ALPHA) p = m[1] / global_possible_occurrences[key - 6] transitions = build_t(p, ind) key_tuples.append((seed, emissions, transitions)) final_tuples[key] = key_tuples return final_tuples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clusterSeeds(seeds, l): \n \n # Code to complete - you are free to define other functions as you like\n \n # The function will work like this,\n # first change all the (x( y1, y2, .., yn)) tuples into (x, y) tuples.\n # then from that point make the adj matrix so that ...
[ "0.5693699", "0.5550675", "0.54997873", "0.5431754", "0.5406691", "0.5335756", "0.53210217", "0.53132635", "0.53088075", "0.53014505", "0.5280768", "0.5259172", "0.5255134", "0.5245243", "0.51910627", "0.5185651", "0.51382864", "0.5113323", "0.5112537", "0.5112202", "0.509078...
0.66207594
0
Return if x == y, if eps is not None, return if abs(xy) <= eps
def all_equal(x, y, eps=None): if eps: return all([abs(i - j) <= eps for i, j in zip(x, y) if i is not None and j is not None]) return all([i == j for i, j in zip(x, y)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def realEqual(x,y,eps=10e-10):\n return abs(x-y) < eps", "def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance", "def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance", "def is_almost_equal(self, x ,y ,epsilon=1*10**(-8)):\n \treturn abs(x-y) <= epsilon", "d...
[ "0.78858256", "0.7044861", "0.6952725", "0.69114923", "0.6853465", "0.6734588", "0.66924584", "0.6683078", "0.66429543", "0.6590756", "0.6590756", "0.64673287", "0.6461987", "0.6440452", "0.64339805", "0.6417307", "0.6406655", "0.6328267", "0.63227063", "0.6311484", "0.622861...
0.7315098
1
Reduce product of x.
def product(x): return functools.reduce(lambda x, y: x * y, x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prod(x):\n return functools.reduce(lambda a, b: a * b, x, 1)", "def prod(self, x, y):\n return self.reduce(x + y)", "def prod(l):\n return reduce(lambda a, b: a*b, l)", "def prod(lst):\n return reduce(mul, lst, 1)", "def _prod(seq):\n return reduce(lambda x, y: x*y, seq, 1)", "def ...
[ "0.8485899", "0.75339127", "0.71145695", "0.70959675", "0.7030964", "0.70278287", "0.70278287", "0.70278287", "0.7009414", "0.69021314", "0.6851268", "0.6762479", "0.6750255", "0.67320514", "0.6707632", "0.66387403", "0.6615138", "0.6581234", "0.6580697", "0.65709585", "0.656...
0.8354631
1
Expand multiple dimensions, i.e. add 1 after or before
def nd_expand_dims(x, n=1, before=True): if before: axes = tuple([np.newaxis] * n + [...]) else: axes = tuple([...] + [np.newaxis] * n) return x[axes]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expand(self):\n self.vertices[-1, :] = self.expanded", "def expand_many(x, axes):\n for ax in axes:\n x = torch.unsqueeze(x, ax)\n return x", "def expand_dims(self, axis, direction=1):\n res = self.empty_like()\n res.shape.insert(axis, [1])\n res.qhape.insert(axis, ...
[ "0.6542548", "0.6396454", "0.6312606", "0.6284453", "0.62287766", "0.6210935", "0.59937435", "0.5978778", "0.597588", "0.5966989", "0.5960754", "0.58849263", "0.58548903", "0.58348984", "0.5816448", "0.5815774", "0.58014554", "0.57916814", "0.56058437", "0.5589006", "0.553772...
0.55461043
20
Return a `list` of `int` that represents a range of axes.
def ranged_axes(shape): return (-np.arange(1, len(shape) + 1)[::-1]).tolist() or -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def range() -> List[int]:\n pass", "def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n ...
[ "0.7072757", "0.698906", "0.6808658", "0.6788123", "0.6770003", "0.6667891", "0.6639482", "0.6613244", "0.6563589", "0.65074825", "0.6391037", "0.6376127", "0.63659567", "0.63645595", "0.635271", "0.6344882", "0.63376725", "0.6322412", "0.6316687", "0.630669", "0.6293695", ...
0.6999914
1
Partition `zipped` into `num_steps`.
def partition(zipped, num_steps, allow_overflow=True): size = len(zipped) parts = [] for i in range(0, size, num_steps): end = i + num_steps if end >= size: parts.append(zip(*zipped[i:])) break elif allow_overflow: parts.append(zip(*zipped[i:end])) return parts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_chunks(num_items, num_steps):\n chunk_sizes = np.zeros(num_steps, dtype=int)\n chunk_sizes[:] = num_items // num_steps\n chunk_sizes[:num_items % num_steps] += 1\n\n chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)\n chunk_offsets[0] = 0\n return chunk_sizes, chunk_offsets", "def test...
[ "0.56117743", "0.5296957", "0.528687", "0.52073896", "0.5157775", "0.51236594", "0.51106584", "0.5107272", "0.5092671", "0.5070655", "0.5070408", "0.5062972", "0.5044236", "0.5033536", "0.50270873", "0.50222856", "0.5005889", "0.5004188", "0.49633753", "0.48935226", "0.487203...
0.80619586
0
Pad or truncate a list `x` with the values `pad_value` and `maxlen`.
def list_pad_or_truncate(x, maxlen, pad_value=None): length = len(x) if maxlen > length: x += [pad_value] * (maxlen - length) elif maxlen < length: x = x[:maxlen] return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pad_with_zero(list, max_length, pad_type):\n padded_list = pad_sequences(list, maxlen=max_length, padding=pad_type, truncating='post')\n return padded_list", "def pad_tokens(x, max_length, pad_token_id,\n truncate_from=\"left\",\n pad_from=\"left\"):\n assert truncate_fro...
[ "0.7232516", "0.7194974", "0.70571303", "0.7005884", "0.69063616", "0.69063616", "0.6899334", "0.68993306", "0.68805975", "0.68804926", "0.68802965", "0.6851657", "0.6702354", "0.66523", "0.6640961", "0.65956634", "0.65647626", "0.65528095", "0.6521586", "0.6515963", "0.64783...
0.8817511
0
List all available api routes
def welcome(): return ( f"Available Routes:<br/>" f"<br/>" f"/api/v1.0/precipitation<br/>" f"- List of previous year rain totals from all stations<br/>" f"<br/>" f"/api/v1.0/stations<br/>" f"- List of station numbers and names<br/>" f"<br/>" f"/api/v1.0/tobs<br/>" f"- List of previous year temperatures from all stations<br/>" f"<br/>" f"/api/v1.0/start<br/>" f"- When given the start date (YYYY-MM-DD), calculates the MIN/AVG/MAX temperature for all dates greater than and equal to start date<br/>" f"<br/>" f"/api/v1.0/start/end<br/>" f"- When given the start and end date (YYYY-MM-DD), calculates the MIN/AVG/MAX temperature for dates between start and end date inclusive<br/>" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_all_apis():\n app.logger.info('Request for api list')\n func_list = []\n for rule in app.url_map.iter_rules():\n if rule.endpoint != 'static':\n methods = ','.join(rule.methods)\n func_list.append(\n (rule.rule, methods, app.view_functions[rule.endpoint...
[ "0.73875153", "0.73784584", "0.7363998", "0.73515403", "0.72580993", "0.7240681", "0.70347446", "0.6954173", "0.6894093", "0.689102", "0.67917705", "0.67899156", "0.6766698", "0.67644465", "0.6707333", "0.66938263", "0.6681043", "0.66805625", "0.6680057", "0.665492", "0.66533...
0.0
-1
Return list of rain fall for previous year
def precipitation(): last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first() last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365) rain = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date > last_year).\ order_by(Measurement.date).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xbrl_years(self):\n return [year for year in self.years if year >= 2021]", "def calculate_iron_hemoglobin_time_lag_effective_fraction(df, years):\n final = pd.DataFrame()\n data = df.reset_index()\n for i in list(range(0, len(years))):\n current = (data.loc[data.year == years[i]]\n ...
[ "0.59791404", "0.5774895", "0.57245696", "0.5675499", "0.55384594", "0.5491503", "0.5471699", "0.5403219", "0.54018354", "0.5384144", "0.5367263", "0.5363518", "0.5355815", "0.5304118", "0.5281944", "0.52812725", "0.5260535", "0.5254466", "0.5250503", "0.52311355", "0.5220862...
0.61494935
0
Return list of temperatures for previous year
def tobs(): last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first() last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365) temperature = session.query(Measurement.date, Measurement.tobs).\ filter(Measurement.date > last_year).\ order_by(Measurement.date).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prior_year_temp():\n\n tobs_data = session.query(Measurements.tobs).all()\n return jsonify (tobs_data)", "def temperatures():\n\n return station_9281", "def temperature():\n #Query for the dates and temperature observations from a year from the last data point.\n\n last_temp= session.query(M...
[ "0.66815954", "0.6298204", "0.6157039", "0.5975296", "0.5776933", "0.5760675", "0.57288337", "0.5723366", "0.5635067", "0.5608153", "0.56007415", "0.55899274", "0.55566156", "0.55073947", "0.5480806", "0.54751027", "0.5452222", "0.5442375", "0.54336363", "0.54262954", "0.5424...
0.6097273
3
Displays a Pandas dataframe in a table. Pass in a dataframe as first argument, and optionally how many items per page to display. ```solara import solara import pandas as pd import plotly df = plotly.data.iris() .component
def DataFrame( df, items_per_page=20, column_actions: List[ColumnAction] = [], cell_actions: List[CellAction] = [], scrollable=False, on_column_header_hover: Optional[Callable[[Optional[str]], None]] = None, column_header_info: Optional[solara.Element] = None, ): return DataTable( df, items_per_page=items_per_page, column_actions=column_actions, cell_actions=cell_actions, scrollable=scrollable, on_column_header_hover=on_column_header_hover, column_header_info=column_header_info, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_df(df):\n with pd.option_context(\"display.max_rows\", 1000, \"display.max_columns\", 100):\n display(df.head(10))", "def render(cls, df: DataFrame, *args, **kwargs):\n from labext.widgets.data_table import DataTable\n dt = DataTable(df, *args, **kwargs)\n display(dt.wi...
[ "0.7244902", "0.68102825", "0.6721509", "0.6696079", "0.66785437", "0.65800536", "0.6570673", "0.6392857", "0.63512766", "0.6313163", "0.62770796", "0.6257613", "0.61832196", "0.6140578", "0.6116468", "0.609051", "0.6064527", "0.60438454", "0.60423064", "0.59957415", "0.59046...
0.6249933
12
default range C2E3b (or C3E4b in American English) default range in midi 4863 transpose move range + 4 notes
def get_piano_notes(transpose=12): transpose = keep_in_range(transpose, 0, 28) # full range 0-127 bottom_note = max(0, 4 * transpose) # left bottom last_note = min(127, bottom_note + 15) # right upper return bottom_note, last_note
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_step_to_midi_04():\n # This should produce 131, above range.\n step = 'C'\n alter = -1\n octave = 10\n with pytest.raises(Exception):\n U.step_to_midi(step, octave, alter)\n # This should produce 128, 1 above range.\n step = 'G'\n alter = 1\n octave = 9\n with pytest.r...
[ "0.5746772", "0.5704539", "0.5549268", "0.5514676", "0.5505806", "0.5377225", "0.53318983", "0.5308374", "0.5286769", "0.51863843", "0.5114025", "0.5084776", "0.5020724", "0.5007611", "0.4982287", "0.49782467", "0.49545217", "0.49246523", "0.4916429", "0.4910963", "0.48535934...
0.0
-1
Create mode of given scale
def scale_to_mode(scale, transpose=0): # find mode scheme based on original scale l = scale[transpose:] # create complete 16-elements list of steps i = ceil((16 - len(l)) / 12) l += scale * i l = list(accumulate(l)) n = l[0] l = list(map(lambda x: x - n, l)) return l[:16]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale...
[ "0.6633015", "0.6360772", "0.62977636", "0.62056977", "0.6008764", "0.60017097", "0.5995628", "0.5878364", "0.5820572", "0.58104134", "0.5709557", "0.56858575", "0.5675628", "0.56720704", "0.5600536", "0.55899876", "0.5563008", "0.5555391", "0.55215645", "0.55069333", "0.5503...
0.6723477
0
Create xlength step cycled scale pattern
def scale_to_16(scale, mode=0, base=0, length=16): pattern = deque(scale) pattern.rotate(mode) result = deque(islice(cycle(pattern), (length - 1))) result.appendleft(base) res_acc = list(accumulate(result)) return res_acc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discrete_layer(width: float, steps: int) -> list:\n\n min_x = 0.001\n steps = steps/2\n\n def sum_function(stretch_factor):\n return width - min_x * ((1 - stretch_factor**steps)/(1 - stretch_factor))\n\n stretch = float(fsolve(sum_function, 1.3)[0])\n\n return sub_division(width, min_x, s...
[ "0.59693444", "0.566605", "0.5649455", "0.5492841", "0.5478045", "0.5360125", "0.53543776", "0.53307503", "0.5323359", "0.5312762", "0.5300492", "0.5283537", "0.52573586", "0.5251563", "0.52463603", "0.52405894", "0.52343446", "0.51484567", "0.5146416", "0.5121765", "0.510040...
0.4679375
78
split midi 0127 values range in equal parts
def pad_intervals(parts, duration=128): part_duration = duration / (parts + 1) return [int((i + 1) * part_duration) for i in range(parts)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clamp_midi(sequence):\r\n return sequence[:, MIN_NOTE:MAX_NOTE, :]", "def test_step_to_midi_04():\n # This should produce 131, above range.\n step = 'C'\n alter = -1\n octave = 10\n with pytest.raises(Exception):\n U.step_to_midi(step, octave, alter)\n # This should produce 128, 1...
[ "0.633378", "0.551024", "0.5489791", "0.5481086", "0.5435663", "0.5356096", "0.53525054", "0.5336537", "0.53029305", "0.52928007", "0.52525324", "0.52401626", "0.5239775", "0.5234048", "0.5220844", "0.5213896", "0.51938474", "0.51900053", "0.51806563", "0.51787907", "0.516458...
0.49355653
50
This function is from the latest version of SCons to support older SCons version. Configure check for a specific program. Check whether program prog_name exists in path. If it is found, returns the path for it, otherwise returns None.
def CheckProg(context, prog_name): context.Message("Checking whether %s program exists..." % prog_name) path = context.env.WhereIs(prog_name) context.Result(bool(path)) return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_program(binary_name):\n pth = os.path.abspath(__file__)\n\n # Split off the name and the directory...\n pth, notused = os.path.split(pth)\n pth, notused = os.path.split(pth)\n pth = os.path.join(pth, \"programs\", binary_name)\n pth = os.path.normpath(pth)\n\n log.debug(\"Checking for...
[ "0.67661804", "0.66921204", "0.65894896", "0.6528314", "0.64908946", "0.6469822", "0.641857", "0.63612264", "0.6318409", "0.6250026", "0.61892205", "0.61833847", "0.61833847", "0.61687654", "0.61340445", "0.61195254", "0.61188084", "0.6089571", "0.6089144", "0.6088777", "0.60...
0.7263758
0
This function is from SCons but extended with additional flags, e.g. the extra_libs. Another (more sophisticated) test for a library. Checks, if library and header is available for language (may be 'C' or 'CXX'). Call maybe be a valid expression _with_ a trailing ';'. As in CheckLib, we support library=None, to test if the call compiles without extra link flags.
def CheckLibWithHeader(context, libs, header, language, call = None, extra_libs = None, autoadd = 1): prog_prefix, dummy = \ SCons.SConf.createIncludesFromHeaders(header, 0) if libs == []: libs = [None] if not SCons.Util.is_List(libs): libs = [libs] res = SCons.Conftest.CheckLib(context, libs, None, prog_prefix, call = call, language = language, extra_libs = extra_libs, autoadd = autoadd) context.did_show_result = 1 return not res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_library(self, **kw):\n\tself.check(\n\t\tcompile_filename = [],\n\t\tfeatures = 'link_lib_test',\n\t\tmsg = 'Checking for libraries',\n\t\t)", "def check_libraries(env):\n # Detect OS X python installation, and attempt to correct for it.\n if os.uname()[0] == 'Darwin':\n env.Replace(SHLINK...
[ "0.70988935", "0.6297976", "0.60500425", "0.56923157", "0.55388916", "0.5488028", "0.53949934", "0.5367634", "0.532435", "0.5322072", "0.5235258", "0.5161452", "0.5156778", "0.51427037", "0.5137287", "0.5064834", "0.501241", "0.4967916", "0.49471545", "0.49432126", "0.4930325...
0.71553355
0
Generate a normal distribution of sleep patterns based around 7 hours of sleep a day
def _get_random_sleep_series(index, mean=60 * 7, std_dev=60): data_points_to_create = len(index) sleep_times = np.random.normal(loc=mean, scale=std_dev, size=data_points_to_create) sleep_series = pd.Series(sleep_times, index=index) return sleep_series
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_rand_7():\n\n while(True):\n # This generates a random number uniformly distributed between 1 and 24.\n # The first term is 5 times a rand num between 1 - 4, yielding {5, 10,\n # 15, 20}. The second is a rand num between 1 - 4.\n # Since the two numbers are *independent*...
[ "0.62330836", "0.59178996", "0.58577496", "0.57935345", "0.5758614", "0.5758422", "0.5744238", "0.5741066", "0.5537405", "0.5528022", "0.5510911", "0.549863", "0.548807", "0.5447449", "0.544088", "0.54101473", "0.53661644", "0.5355117", "0.53540385", "0.53415316", "0.53243715...
0.5421668
15
Used for fetching the attributes for __init__.
def _connect(self): try: return self.query('/') except Exception as err: log.error('%s: %s', self.baseurl, err) raise NotFound('No server found at: %s' % self.baseurl)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_attrs(self):\n raise NotImplementedError", "def __attrs_post_init__(self):", "def attributes(self):\n ...", "def attributes(self):", "def extra_init(self):\n pass", "def __init__():", "def _init_attributes(self):\n self.attr = {\n 'name': None,\n ...
[ "0.7480157", "0.72398555", "0.7138921", "0.71215737", "0.66924196", "0.66607845", "0.6618965", "0.65260583", "0.65222377", "0.6492983", "0.6481646", "0.63778156", "0.6333728", "0.6315431", "0.6311145", "0.6308918", "0.6308918", "0.6308918", "0.6308918", "0.6308918", "0.630891...
0.0
-1
Library to browse or search your media.
def library(self): if not self._library: self._library = Library(self, self.query('/library/')) return self._library
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_browse_media(self, media_content_type=None, media_content_id=None):\n return await media_source.async_browse_media(\n self.hass,\n media_content_id,\n content_filter=lambda item: item.media_content_type.startswith(\"audio/\"),\n )\n \n #T...
[ "0.668259", "0.61301744", "0.60395354", "0.59826845", "0.5969798", "0.59561574", "0.57918155", "0.5790495", "0.57535", "0.57049066", "0.56371135", "0.55711895", "0.55135405", "0.54885805", "0.54820323", "0.5427746", "0.54237807", "0.5411913", "0.54091287", "0.5393908", "0.538...
0.0
-1
Query PMS for all clients connected to PMS.
def clients(self): items = [] for elem in self.query('/clients'): baseurl = 'http://%s:%s' % (elem.attrib['address'], elem.attrib['port']) items.append(PlexClient(baseurl, server=self, data=elem)) return items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serviceQueries(self):\n #log.debug(f\"{self.name}: servicing queries for {len(self.connections)} connections.\")\n for ca, requester in self.connections.items():\n if requester.ims:\n log.info(\"Server %s received:\\n%s\\n\", self.name, requester.ims)\n re...
[ "0.6438978", "0.615843", "0.61226875", "0.59365004", "0.5798395", "0.5786192", "0.5703405", "0.56942713", "0.5646254", "0.5593451", "0.5584262", "0.556786", "0.55544364", "0.55305445", "0.5511454", "0.5496943", "0.54856825", "0.5469359", "0.54657024", "0.5449885", "0.5416547"...
0.58461624
4
Querys PMS for all clients connected to PMS.
def client(self, name): for elem in self.query('/clients'): if elem.attrib.get('name').lower() == name.lower(): baseurl = 'http://%s:%s' % ( elem.attrib['address'], elem.attrib['port']) return PlexClient(baseurl, server=self, data=elem) raise NotFound('Unknown client name: %s' % name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serviceQueries(self):\n #log.debug(f\"{self.name}: servicing queries for {len(self.connections)} connections.\")\n for ca, requester in self.connections.items():\n if requester.ims:\n log.info(\"Server %s received:\\n%s\\n\", self.name, requester.ims)\n re...
[ "0.65767473", "0.6232553", "0.60960466", "0.5942476", "0.5886892", "0.58853275", "0.57991374", "0.578795", "0.5773331", "0.5660284", "0.565363", "0.56275344", "0.55649745", "0.556454", "0.5544258", "0.5528457", "0.55071217", "0.5499738", "0.549224", "0.5471458", "0.54629916",...
0.0
-1
Headers given to PMS.
def headers(self): headers = BASE_HEADERS if self.token: headers['X-Plex-Token'] = self.token return headers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def headers(self) -> dict:\n raise NotImplementedError # pragma: no cover", "def get_headers(self):\r\n raise NotImplementedError", "def getAllHeaders():", "def headers(self):\n\n return None", "def headers(self) -> Mapping[str, str]:\n return pulumi.get(self, \"headers\")", ...
[ "0.7567973", "0.7393012", "0.7389167", "0.72775483", "0.7240663", "0.723666", "0.720173", "0.720087", "0.71737015", "0.71737015", "0.7157145", "0.7148502", "0.7143607", "0.7109385", "0.7090964", "0.7078412", "0.70776564", "0.70776564", "0.70469546", "0.70202124", "0.70194054"...
0.73056155
3
Returns a playlist with a given name or raise NotFound.
def playlist(self, title): # noqa for item in self.playlists(): if item.title == title: return item raise NotFound('Invalid playlist title: %s' % title)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPlaylist(self,name):\n playlist = self.getAllPlaylists(name)\n return playlist[0] if playlist else None", "def find_playlist(playlist_name):\n\n playlists = spotifyObject.user_playlists(config.USERNAME)\n\n for playlist in playlists['items']:\n if playlist['name'] == playlist_na...
[ "0.7941071", "0.7861843", "0.78293544", "0.7535346", "0.69100803", "0.6823972", "0.6806134", "0.6756279", "0.67378354", "0.6722012", "0.6667086", "0.66420937", "0.65845215", "0.65714145", "0.6559312", "0.65355885", "0.65215516", "0.6419069", "0.63462394", "0.63388675", "0.629...
0.79534113
0
Main method used to handle http connection to PMS. encodes the response to utf8 and parses the xml returned from PMS into a Element
def query(self, path, method=None, headers=None, **kwargs): url = self.url(path) method = method or self.session.get log.info('%s %s', method.__name__.upper(), url) h = self.headers().copy() if headers: h.update(headers) response = method(url, headers=h, timeout=TIMEOUT, **kwargs) if response.status_code not in [200, 201]: codename = codes.get(response.status_code)[0] raise BadRequest('(%s) %s' % (response.status_code, codename)) data = response.text.encode('utf8') return ElementTree.fromstring(data) if data else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_response(self, resp):\n p, u = self.getparser()\n\n if hasattr(resp, 'text'):\n # modern requests will do this for us\n text = resp.text # this is unicode(py2)/str(py3)\n else:\n\n encoding = requests.utils.get_encoding_from_headers(resp.headers)\n ...
[ "0.592283", "0.5827948", "0.579195", "0.5679414", "0.5676833", "0.5584127", "0.55087835", "0.54462636", "0.54320216", "0.5430315", "0.53699476", "0.53515357", "0.5332319", "0.53168535", "0.52934587", "0.52518004", "0.5248539", "0.52266735", "0.52137244", "0.52040625", "0.5204...
0.0
-1
Searching within a library section is much more powerful.
def search(self, query, mediatype=None): items = utils.listItems(self, '/search?query=%s' % quote(query)) if mediatype: return [item for item in items if item.type == mediatype] return items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif c...
[ "0.66239214", "0.5919388", "0.5910919", "0.58785844", "0.58637214", "0.58380646", "0.58259964", "0.58247703", "0.57671076", "0.5762657", "0.5658307", "0.5657428", "0.56097376", "0.5589635", "0.55391014", "0.55387616", "0.5529388", "0.55221266", "0.5514941", "0.5492186", "0.54...
0.0
-1
List all active sessions.
def sessions(self): return utils.listItems(self, '/status/sessions')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sessions(self):\n\n return self.all_sessions", "def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200", "def fusion_api_get_active_sessions(self):\n ...
[ "0.75758445", "0.757478", "0.7396808", "0.7384779", "0.73801714", "0.72702295", "0.71300334", "0.7045607", "0.7029493", "0.6845006", "0.679199", "0.6788236", "0.6770604", "0.6673959", "0.6615374", "0.65827996", "0.6541172", "0.65161306", "0.6476787", "0.6473066", "0.647231", ...
0.76642567
0