query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Run the task compose full series + add to our results
def run(self): while self.i < len(self.series): # Grab line + RSS s = self.series[self.i] rss = self.request_rss(s.feedUrl) # Compose Episodes ep_dicts = [] for entry in rss['entries']: ep_dicts.append(Episode(s, entry).__dict__) # Build result dict result_dict = dict() result_dict['series'] = deepcopy(s.__dict__) result_dict['series']['genres'] = \ result_dict['series']['genres'].split(';') result_dict['series']['type'] = 'series' result_dict['episodes'] = ep_dicts # Store podcast self.storer.store(result_dict) # Move onto the next one self.i += 20 print("Retrieved " + str(s.id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_series(self):\n for n in xrange(self.conf[\"n_runs\"]):\n self.runs[n].execute()", "def run(self):\n if self.task == 'all':\n self.produce_all_term_data()\n else:\n self.produce_next_term_data()", "def run(self):\n import sacc\n im...
[ "0.63525146", "0.5947928", "0.5696838", "0.5634496", "0.5608928", "0.56013626", "0.5599923", "0.5587855", "0.55873567", "0.55794024", "0.5530784", "0.551449", "0.55136603", "0.54978615", "0.5480734", "0.5459616", "0.5439924", "0.5439924", "0.54238594", "0.54176867", "0.540211...
0.54119116
20
Test the configuration is not overrided but extend for `suppress`
def test_rich_traceback_configuration_extend_suppress(mocker, default_logging_config): import click rich_traceback_install = mocker.patch("rich.traceback.install") rich_pretty_install = mocker.patch("rich.pretty.install") sys_executable_path = str(Path(sys.executable).parent) traceback_install_defaults = {"suppress": [click, sys_executable_path]} fake_path = "dummy" rich_handler = { "class": "kedro.logging.RichHandler", "rich_tracebacks": True, "tracebacks_suppress": [fake_path], } test_logging_config = default_logging_config test_logging_config["handlers"]["rich"] = rich_handler LOGGING.configure(test_logging_config) expected_install_defaults = traceback_install_defaults expected_install_defaults["suppress"].extend([fake_path]) rich_traceback_install.assert_called_with(**expected_install_defaults) rich_pretty_install.assert_called_once()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_suppress_import(self):\n logging.info(\"testing suppress import\")\n\n generated_file = os.path.join(self._test_workspace,\n \"generated.suppress\")\n\n extract_cmd = ['CodeChecker', 'parse',\n os.path.join(self._test_workspac...
[ "0.6450392", "0.6399377", "0.63585526", "0.62900907", "0.6012756", "0.59775937", "0.5947511", "0.59247243", "0.5893758", "0.5858456", "0.5848734", "0.5842272", "0.58085376", "0.5727631", "0.56761247", "0.56608504", "0.56291807", "0.5626758", "0.56165975", "0.56113476", "0.560...
0.6306293
3
Prints solution on console.
def print_solution(data, manager, routing, solution): time_dimension = routing.GetDimensionOrDie('Time') total_time = 0 for vehicle_id in range(data['num_vehicles']): index = routing.Start(vehicle_id) plan_output = 'Route for vehicle {}:\n'.format(vehicle_id) while not routing.IsEnd(index): time_var = time_dimension.CumulVar(index) plan_output += '{0} Time({1},{2}) -> '.format( manager.IndexToNode(index), solution.Min(time_var), solution.Max(time_var)) index = solution.Value(routing.NextVar(index)) time_var = time_dimension.CumulVar(index) plan_output += '{0} Time({1},{2})\n'.format(manager.IndexToNode(index), solution.Min(time_var), solution.Max(time_var)) plan_output += 'Time of the route: {}min\n'.format( solution.Min(time_var)) print(plan_output) total_time += solution.Min(time_var) print('Total time of all routes: {}min'.format(total_time))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_solution():\n pass", "def solve(self):\n print(\"Problem %s Answer: %s\" % (self.number, self.solution()))", "def printSolution(self):\n print \"----- Solution -----\"\n for feature in self.features:\n print \"Name = \" + feature.name + \" Value = \" + str(feature.v...
[ "0.87028384", "0.74389744", "0.7377259", "0.708527", "0.7000077", "0.69975126", "0.69795585", "0.6975123", "0.6821493", "0.6821493", "0.6821493", "0.6735901", "0.6602481", "0.65324736", "0.65144485", "0.64514583", "0.64233005", "0.64180934", "0.63984954", "0.6382045", "0.6381...
0.5708058
89
Returns the travel time between the two nodes.
def time_callback(from_index, to_index): # Convert from routing variable Index to time matrix NodeIndex. from_node = manager.IndexToNode(from_index) to_node = manager.IndexToNode(to_index) return self.data['time_matrix'][from_node][to_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nodes_time(self):\n return self._nodes_time", "def _node_distance(self, first, second):\r\n\r\n name_1 = first.name.split(' ')[0]\r\n name_2 = second.name.split(' ')[0]\r\n\r\n seq1 = self.msa_by_name[name_1]\r\n seq2 = self.msa_by_name[name_2]\r\n\r\n distance = sel...
[ "0.69388837", "0.6491376", "0.64647245", "0.644064", "0.64244837", "0.64106727", "0.64093053", "0.6311803", "0.6295542", "0.61754763", "0.6144564", "0.613612", "0.61312324", "0.5971906", "0.59415174", "0.5910606", "0.5904154", "0.5904154", "0.59017485", "0.58911026", "0.58676...
0.0
-1
Variable assignment can include assigning array elements.
def assign_variable(executor, variable, value): variable = variable.replace(" ", "") # TODO Should move parsing of this to ParsedStatementLet. # TODO Need to handle N-dimensional array element assignment. i = variable.find("(") if i != -1: # Array reference j = variable.find(")", i+1) if j == -1: raise BasicSyntaxError(F"Missing ) in in array assignment to {variable}") if i+1 == j: raise BasicSyntaxError(F"Missing array subscript in assignment to {variable}") subscripts = variable[i+1:j].split(",") variable = variable[:i] is_valid_identifier(variable) subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts] executor.put_symbol_element(variable, value, subscripts) else: is_valid_identifier(variable) executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texp...
[ "0.6699032", "0.6667143", "0.66010433", "0.64327574", "0.6394654", "0.632295", "0.62098926", "0.61762494", "0.6125874", "0.6119721", "0.60467184", "0.6016822", "0.5979976", "0.5958759", "0.58768624", "0.57980937", "0.5793912", "0.5786944", "0.57581115", "0.5734274", "0.573222...
0.7048861
0
Declares an array. Initializes it to zeros. TODO Handle more than two dimensions.
def stmt_dim(executor, stmt:ParsedStatementDim): for name, value in stmt._dimensions: initializer = init_array(value) executor.put_symbol(name, initializer, SymbolType.ARRAY, arg=None) # Not right, but for now.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zeros_numpy_array(self,\n type_name,\n as_matrix = True):\n if as_matrix:\n return np.zeros((1, self.get_max_id(type_name)), dtype='float32')\n else:\n return np.zeros((self.get_max_id(type_name),), dtype='float32')", "def zero_init(self, shape):\...
[ "0.66444355", "0.66228235", "0.65934485", "0.65253294", "0.63993794", "0.63414425", "0.6330485", "0.63030803", "0.6281445", "0.62749404", "0.6273253", "0.62706274", "0.62627155", "0.62333333", "0.6128069", "0.6122897", "0.60999733", "0.60854125", "0.6048962", "0.6042979", "0....
0.0
-1
An if statement works by skipping to the next line, if the THEN clause is false, otherwise it continues to execute the clauses after the THEN.
def stmt_if(executor, stmt): e = Expression() result = e.eval(stmt._tokens, symbols=executor._symbols) if not result: executor.goto_next_line()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_ifeq(self):\n self.indent_left()\n self.write_line(\"endif\")", "def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\...
[ "0.6381344", "0.6276441", "0.6248517", "0.6244961", "0.62422997", "0.61599344", "0.6136429", "0.6136429", "0.5794875", "0.5792966", "0.575992", "0.57597315", "0.5758323", "0.5756217", "0.573461", "0.56686", "0.5654149", "0.5635589", "0.5634545", "0.563404", "0.5616345", "0....
0.741904
0
Define a userdefined function. 470 DEF FND(D)=SQR((K(I,1)S1)^2+(K(I,2)S2)^2)
def stmt_def(executor, stmt:ParsedStatementDef): executor.put_symbol(stmt._variable, stmt._tokens, SymbolType.FUNCTION, stmt._function_arg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dmy_fn(x):\n return 0.4*(2.0*(np.exp(x*4) + np.exp(-x*4)) - 8 + 0.6*x - 6*x**2)", "def calculate_fn_value(self) :\r\n\r\n self.fn_value = self.gn_value + self.hn_value #f(n) = g(n) + h(n)\r", "def d2f2dx1x5_func(self,X):\n return(\n -(self.rj**2)*self.rm*self.k_spr*(self.b_spr**2)...
[ "0.6282961", "0.6064702", "0.59194523", "0.5878748", "0.58671814", "0.5861088", "0.5792502", "0.57803655", "0.5707842", "0.5690402", "0.56867594", "0.5677876", "0.566242", "0.5646247", "0.5644403", "0.56438243", "0.56058556", "0.5597457", "0.5594543", "0.5573553", "0.5563041"...
0.0
-1
The WIDTH statement is only for compatibility with some versions of BASIC. It set the width of the screen. Ignored.
def stmt_width(executor, stmt): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def width(self):\n return(self.SCREEN_W)", "def width(self):\n\t\tpass", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):",...
[ "0.7544066", "0.7302459", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.69712067", "0.69448876", "0.6928398", "0.6928398", "0.6913508", "0.6904323", "0.68787444", "0...
0.0
-1
mode must be "claims" or "channels"
def make_graph(mode, show=True): if mode != "claims" and mode != "channels": return plt.close("all") # Open the DB db_file = "/home/brewer/local/lbry-sdk/lbry/lbryum-data/claims.db" conn = sqlite3.connect(db_file) c = conn.cursor() # List for results times = [] # Query if mode == "claims": x = "<>" else: x = "=" query = "SELECT creation_timestamp FROM claim\ WHERE claim_type {x} 2;".format(x=x) # Iterate over query results i = 0 for t in c.execute(query): times.append(t) i = i + 1 # We can also close the connection if we are done with it. # Just be sure any changes have been committed or they will be lost. conn.close() # Sort the times and convert to a numpy array times = np.sort(np.array(times).flatten()) # Save some stats to JSON for Electron now = time.time() my_dict = {} my_dict["unix_time"] = now my_dict["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC" my_dict["total_{mode}".format(mode=mode)] = int(\ len(times)) my_dict["new_{mode}_1_hour".format(mode=mode)] = int(\ np.sum(times > (now - 3600.0))) my_dict["new_{mode}_24_hours".format(mode=mode)] = int(\ np.sum(times > (now - 86400.0))) my_dict["new_{mode}_7_days".format(mode=mode)] = int(\ np.sum(times > (now - 7*86400.0))) my_dict["new_{mode}_30_days".format(mode=mode)] = int(\ np.sum(times > (now - 30*86400.0))) f = open("{mode}_stats.json".format(mode=mode), "w") f.write(json.dumps(my_dict)) f.close() # Count new claims this UTC day count_today = np.sum(times > 86400.0*int(now/86400.0)) if mode == "claims": string = "publications" else: string = "channels" print("{K} {mode}, {n} from today so far (UTC). ".format(K=len(times), mode=string, n=count_today), end="", flush=True) # Plotting stuff plt.rcParams["font.family"] = "Liberation Sans" plt.rcParams["font.size"] = 14 plt.style.use("dark_background") plt.rcParams["axes.facecolor"] = "#3c3d3c" plt.rcParams["savefig.facecolor"] = "#3c3d3c" plt.figure(figsize=(15, 11)) plt.subplot(2, 1, 1) times_in_days = (times - 1483228800)/86400.0 days = times_in_days.astype("int64") plt.plot(times_in_days, np.arange(len(times)), "w-", linewidth=1.5) plt.ylabel("Cumulative number of {mode}".format(mode=string)) plt.title("Total number of {mode} = {n}.".format(n=len(times), mode=string)) plt.xlim([0.0, days.max() + 1]) plt.ylim(bottom=-100) plt.gca().tick_params(labelright=True) # Add vertical lines for new years (approximately) new_years = np.arange(0, 5)*365.2425 for year in new_years: plt.axvline(year, color="r", alpha=0.8, linestyle="--") # Add text about years year_names = [2017, 2018, 2019] for i in range(len(year_names)): year = new_years[i] plt.text(year+5.0, 0.95*plt.gca().get_ylim()[1], "{text} begins".format(text=year_names[i]), fontsize=10) # Add line and text about MH's video plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g") plt.text(890.0, 0.2*plt.gca().get_ylim()[1], "@MH video\n\'Why I Left YouTube\'\ngoes viral", fontsize=10) plt.subplot(2, 1, 2) bin_width = 1.0 # Bin edges including right edge of last bin bins = np.arange(0, np.max(days)+2) - 0.5*bin_width color = "#6b95ef" counts = plt.hist(days, bins, alpha=0.9, color=color, label="Raw", width=bin_width, align="mid")[0] # Compute 10-day moving average moving_average = np.zeros(len(bins)-1) for i in range(len(moving_average)): subset = counts[0:(i+1)] if len(subset) >= 10: subset = subset[-10:] moving_average[i] = np.mean(subset) plt.plot(bins[0:-2] + 0.5*bin_width, moving_average[0:-1], "w-", label="10-day moving average", linewidth=1.5) plt.xlim([0.0, days.max() + 1]) plt.xlabel("Time (days since 2017-01-01)") plt.ylabel("New {mode} added each day".format(mode=string)) subset = counts[-31:-1] plt.title("Recent average rate (last 30 days) = {n} {mode} per day.".\ format(n=int(np.sum(time.time() - times <= 30.0*86400.0)/30.0), mode=string)) plt.gca().tick_params(labelright=True) # Year lines for year in new_years: plt.axvline(year, color="r", alpha=0.8, linestyle="--") # MH line plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g") # plt.gca().set_yticks([1.0, 10.0, 100.0, 1000.0, 10000.0]) # plt.gca().set_yticklabels(["1", "10", "100", "1000", "10000"]) plt.legend() plt.savefig("{mode}.svg".format(mode=mode), bbox_inches="tight") plt.savefig("{mode}.png".format(mode=mode), bbox_inches="tight", dpi=70) print("Figure saved to {mode}.svg and {mode}.png.".format(mode=mode)) if show: plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mode (self, mode) :\r\n self.mode_ = mode", "def set_mode(self, mode):\n\t\tif mode not in (self.MODE_PREVIEW, self.MODE_ANALYZE, self.MODE_SEND):\n\t\t\traise ValueError('mode must be one of the MODE_* constants')\n\t\tself._mode = mode\n\t\tif mode == self.MODE_ANALYZE:\n\t\t\tself.attachment_images =...
[ "0.60435784", "0.57577735", "0.55922663", "0.558323", "0.5573023", "0.554792", "0.55342835", "0.55269134", "0.55122584", "0.5491209", "0.5439935", "0.54331106", "0.5416338", "0.54091513", "0.5399344", "0.5399344", "0.5399344", "0.5391328", "0.53808737", "0.5337275", "0.533549...
0.0
-1
Calculate tips over past X amount of time and write JSON output
def aggregate_tips(): # The SQL query to perform now = time.time() print("Computing tip stats...", end="", flush=True) labels = ["30_days", "7_days", "24_hours", "1_hour"] windows = [30*86400.0, 7*86400.0, 1*86400.0, 3600.0] result = {} result["unix_time"] = now result["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC" # Agrees with old method, but should it be SUM(amount)? query = "SELECT support_id, amount, time, claim_name, claim_id, is_nsfw, SUM(to_claim_address) tot FROM (SELECT support.id as support_id, support.support_amount amount,\ transaction.transaction_time time, claim.is_nsfw is_nsfw,\ claim.claim_id claim_id, claim.name claim_name,\ (CASE WHEN (output.address_list LIKE CONCAT('%25', claim_address, '%25')) THEN '1' ELSE '0' END) to_claim_address\ FROM claim\ INNER JOIN support ON support.supported_claim_id = claim.claim_id\ INNER JOIN transaction ON support.transaction_hash_id = transaction.hash\ INNER JOIN output ON transaction.hash = output.transaction_hash \ WHERE transaction.transaction_time > ({now} - {window})\ AND transaction.transaction_time <= {now}) AS result\ GROUP BY support_id, amount;".format(now=now, window=windows[0]) request = requests.get("https://chainquery.lbry.com/api/sql?query=" + query) the_dict = request.json() # Get tips into numpy array times = [] tips = [] is_tip = [] links = [] is_nsfw = [] for row in the_dict["data"]: times.append(float(row["time"])) tips.append(float(row["amount"])) links.append("https://open.lbry.com/" + str(row["claim_name"]) + ":"\ + str(row["claim_id"])) is_nsfw.append(row["is_nsfw"]) if row["tot"] > 0: is_tip.append(True) else: is_tip.append(False) times = np.array(times) tips = np.array(tips) is_tip = np.array(is_tip) links = np.array(links) is_nsfw = np.array(is_nsfw) # Write tips for i in range(len(labels)): keep = (times > (now - windows[i])) & is_tip _times = times[keep] _tips = tips[keep] _links = links[keep] _is_nsfw = is_nsfw[keep] result["num_tips_{label}".format(label=labels[i])] = len(_tips) result["lbc_tipped_{label}".format(label=labels[i])] = float(_tips.sum()) maxtip = 0 maxtip_link = None maxtip_is_nsfw = None if len(_tips) > 0: maxtip = float(_tips.max()) index = np.argmax(_tips) maxtip_link = _links[index] maxtip_is_nsfw = _is_nsfw[index] result["biggest_tip_{label}".format(label=labels[i])] = maxtip result["biggest_tip_{label}_link".format(label=labels[i])] = maxtip_link result["biggest_tip_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw) # Write supports for i in range(len(labels)): keep = (times > (now - windows[i])) & (~is_tip) _times = times[keep] _tips = tips[keep] _links = links[keep] _is_nsfw = is_nsfw[keep] result["num_supports_{label}".format(label=labels[i])] = len(_tips) result["lbc_supports_{label}".format(label=labels[i])] = float(_tips.sum()) maxtip = 0 maxtip_link = None maxtip_is_nsfw = None if len(_tips) > 0: maxtip = float(_tips.max()) index = np.argmax(_tips) maxtip_link = _links[index] maxtip_is_nsfw = _is_nsfw[index] result["biggest_support_{label}".format(label=labels[i])] = maxtip result["biggest_support_{label}_link".format(label=labels[i])] = maxtip_link result["biggest_support_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw) f = open("tips_stats.json", "w") f.write(json.dumps(result)) f.close() print("done. ", flush=True, end="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_tip(meal_base, tip_rate):", "def time_taken(json_cutlist, laser):\r\n\tcutlist = json.loads(json_cutlist)\r\n\ttime = 0\r\n\tcoordinate_array = [0, 0]\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\tcoordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) -...
[ "0.5688149", "0.55947673", "0.5528684", "0.55030835", "0.5432563", "0.53793204", "0.5325283", "0.53094494", "0.52589875", "0.51901346", "0.51684177", "0.5168089", "0.5163765", "0.5146831", "0.511869", "0.5105191", "0.50967735", "0.5090349", "0.5079822", "0.5079822", "0.507982...
0.6560097
0
Publish files to somewhere on the internet.
def publish_files(): print("Publishing files to the internet...", end="", flush=True) import subprocess try: subprocess.run("./upload.sh", timeout=120.0) print("done.\n") except: print("failed.\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, filename):\n # 1) Encrypt file\n # 2) Publish to remote cloud server\n # 3) Wait for the result\n # 4) Store results in files located inside RAM folder", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system...
[ "0.74543023", "0.6922994", "0.6866991", "0.676951", "0.642343", "0.6293285", "0.6157238", "0.6132221", "0.60680485", "0.59941494", "0.5980355", "0.5970285", "0.59354484", "0.5896326", "0.58699507", "0.5835118", "0.5805512", "0.58000696", "0.57821625", "0.5779796", "0.57461995...
0.77630585
0
create a database connection to a SQLite database
def create_connection(db_file): conn = None try: conn = sqlite3.connect(db_file) return conn except Error as e: print(e) return conn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_db(self) -> sqlite3.Connection:", "def create_connection():\n dir_path = os.path.dirname(os.path.abspath(__file__))\n db_file = dir_path + '/py_sqlite.db'\n try:\n DbUtil.connection = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n f...
[ "0.8269675", "0.8154287", "0.8153167", "0.80855244", "0.8006094", "0.79054296", "0.7893724", "0.78821623", "0.7875023", "0.7869494", "0.7866777", "0.7866777", "0.7866777", "0.7866019", "0.78544617", "0.7850683", "0.78433925", "0.7838268", "0.78342485", "0.78307635", "0.782310...
0.77235234
53
Start a thread and consume messages there.
def async_consume(self, callback, auto_ack=False): logging.info("Async consume") if self.thread is not None: return self.thread_stop = False def wrapped_callback(ch, method, properties, body): #logging.info("Wrapped callback'd") callback(ch, method, properties, body) #if not self.thread_stop: # callback(ch, method, properties, body) #else: # print("Should stop now!") # callback(ch, method, properties, body) # self.channel.basic_cancel(self.tag) # exit self.thread = threading.Thread(target=self.consume, args=(wrapped_callback,), kwargs={"auto_ack":auto_ack}) self.thread.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_thread(self):\n self.thread = Thread(target=self.put_lines_into_queue)\n self.thread.daemon = True\n self.thread.start()", "def start(self):\n\n def pubsub_thread():\n \"\"\" Call get_message in loop to fire _handler. \"\"\"\n\n while not self._stop.is_...
[ "0.71411765", "0.6965287", "0.68307364", "0.6813745", "0.6759223", "0.66143614", "0.65682524", "0.6550619", "0.65052104", "0.64535457", "0.6452136", "0.6439701", "0.6390061", "0.63585454", "0.6282679", "0.6282162", "0.62618804", "0.6250061", "0.6245712", "0.6242766", "0.62239...
0.0
-1
Build the command word. Note
def get_cmd_word(cmd, d_width, d_length): word = 0x1 # cmd valid word = word | (d_width - 1) << 1 # cmd dataWidth (3->4B, 1->2B, 0->1B) word = word | cmd << 3 # cmd type (1->RD, 0->WR) word = word | d_length << 8 # cmd burst length (1->1 word) word = word | 0 << 16 # unused return word
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_command(self, cmd, unit):\n return '#' + unit + cmd + NEWLINE", "def _build_simple_command(self, cmd):\n return cmd+SBE37_NEWLINE", "def _build_direct_command(self, cmd, arg):\n return \"%s%s\" % (arg, self._newline)", "def _buildCmd(self, cmd, cmdArg=0x00):\n res = [cm...
[ "0.79212874", "0.744362", "0.7122005", "0.707842", "0.70561314", "0.69483036", "0.6882322", "0.6856377", "0.6743266", "0.67065525", "0.66842824", "0.6522843", "0.6477476", "0.64324766", "0.6427607", "0.6427607", "0.6427607", "0.64067274", "0.6403515", "0.63853884", "0.6316158...
0.0
-1
Return a new instance of a DevMode object.
def __init__(self, mb_info, switch_config): self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM) self.iop_switch_config = switch_config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _new_instance(self):\n return self.__class__(self._vmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def debug(self):\n return Debug(self)", "def dev_mode(self):\r\n return...
[ "0.5734324", "0.55724305", "0.55724305", "0.5561178", "0.55437505", "0.55043155", "0.5407992", "0.53603274", "0.5352191", "0.53068537", "0.5200111", "0.5169847", "0.51558495", "0.51315564", "0.51260436", "0.5092145", "0.5049877", "0.5045362", "0.500854", "0.49994266", "0.4944...
0.0
-1
Start the Microblaze Processor. The processor instance will start automatically after instantiation.
def start(self): self.microblaze.run() self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0) self.load_switch_config(self.iop_switch_config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startup(self):\n if self.initialize_mp:\n self.initialize_multiprocessing()\n self.startup_run()\n self.startup_finish()", "def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)", "def platfor...
[ "0.6218762", "0.60426116", "0.5927747", "0.5888535", "0.5852811", "0.58245885", "0.5803731", "0.57714486", "0.57650065", "0.5744394", "0.5729824", "0.5724727", "0.5718365", "0.5661167", "0.5647819", "0.5586553", "0.5540475", "0.553082", "0.551599", "0.5512633", "0.5512412", ...
0.6880082
0
Put the Microblaze processor into reset. This method will set processor status as "STOPPED".
def stop(self): self.microblaze.reset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n # The camera will give no response to this command\n self._serial_io('\\x55\\x99\\x66\\x11', None)\n while True:\n try:\n self.system_state = 0x11\n if self.system_state == 0x11:\n break\n except CygnetEx...
[ "0.62783825", "0.6214647", "0.62101734", "0.62012935", "0.6188777", "0.6144242", "0.6094757", "0.60869604", "0.6043643", "0.6032759", "0.6026657", "0.6011027", "0.600109", "0.59982294", "0.5969956", "0.59564924", "0.5952704", "0.59244686", "0.5921813", "0.5914156", "0.5907323...
0.70397276
0
Load the Microblaze processor's switch configuration. This method will update switch config. Each pin requires 8 bits for configuration.
def load_switch_config(self, config=None): if config is None: config = ARDUINO_SWCFG_DIOALL elif not len(config) == 4*ARDUINO_SWITCHCONFIG_NUMREGS: raise TypeError('Invalid switch config {}.'.format(config)) # Build switch config word self.iop_switch_config = config sw_config_words = [0]*ARDUINO_SWITCHCONFIG_NUMREGS for ix, cfg in enumerate(self.iop_switch_config): if ix < 4: sw_config_words[0] |= (cfg << ix*8) elif ix < 8: sw_config_words[1] |= (cfg << (ix-4)*8) elif ix < 12: sw_config_words[2] |= (cfg << (ix-8)*4) elif ix < 16: sw_config_words[3] |= (cfg << (ix-12)*4) else: sw_config_words[4] |= (cfg << (ix-16)*4) # Configure switch for i in range(ARDUINO_SWITCHCONFIG_NUMREGS): self.write_cmd(ARDUINO_SWITCHCONFIG_BASEADDR + 4*i, sw_config_words[i])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError", "def configure_switch(self, config):\n raise NotImplementedError", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_...
[ "0.62282544", "0.5851503", "0.5757967", "0.5553404", "0.5420665", "0.5405235", "0.5385367", "0.53103375", "0.5305637", "0.52973616", "0.5270049", "0.52667636", "0.5264424", "0.52271485", "0.5182084", "0.51805335", "0.51612717", "0.515244", "0.5107933", "0.50897145", "0.507896...
0.67974967
0
Returns the status of the Microblaze processor. Returns str The processor status ("IDLE", "RUNNING", or "STOPPED").
def status(self): return self.microblaze.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status(self):\n return self.read_register(259, 0, 3)", "async def get_status(self) -> str:\n return await self.hw_device.status()", "def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-ab...
[ "0.66442674", "0.65349734", "0.6357901", "0.61985266", "0.61985266", "0.6195143", "0.61590946", "0.6113441", "0.6109538", "0.6081476", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187"...
0.692374
0
Send a write command to the mailbox.
def write_cmd(self, address, data, d_width=4, d_length=1, timeout=10): # Write the address and data self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_ADDR_OFFSET, address) self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_DATA_OFFSET, data) # Build the write command cmd_word = get_cmd_word(WRITE_CMD, d_width, d_length) self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, cmd_word) # Wait for ACK in steps of 1ms countdown = timeout while not self.is_cmd_mailbox_idle() and countdown > 0: time.sleep(0.001) countdown -= 1 # If ACK is not received, alert users. if countdown == 0: raise RuntimeError("ArduinoDevMode write_cmd() not acknowledged.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, cmd):\n self._chan.send(\"{}\\n\".format(cmd))\n logger.debug(\"sent '{}'\".format(cmd))", "def write(self, cmd):\n if self.tn:\n try:\n self.wlock.acquire()\n if not isinstance(cmd, bytes): cmd = cmd.encode('ascii')\n s...
[ "0.6980138", "0.68226683", "0.6813809", "0.6756463", "0.66383994", "0.6596419", "0.6563577", "0.6360743", "0.6337061", "0.63311714", "0.6312822", "0.6311199", "0.62932336", "0.6277489", "0.6255314", "0.6235374", "0.62292373", "0.6213919", "0.62082815", "0.6193863", "0.6186107...
0.646782
7
Send a read command to the mailbox.
def read_cmd(self, address, d_width=4, d_length=1, timeout=10): # Write the address self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_ADDR_OFFSET, address) # Build the read command cmd_word = get_cmd_word(READ_CMD, d_width, d_length) self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, cmd_word) # Wait for ACK in steps of 1ms countdown = timeout while not self.is_cmd_mailbox_idle() and countdown > 0: time.sleep(0.001) countdown -= 1 # If ACK is not received, alert users. if countdown == 0: raise RuntimeError("ArduinoDevMode read_cmd() not acknowledged.") result = self.microblaze.read(MAILBOX_OFFSET + MAILBOX_PY2IOP_DATA_OFFSET) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self):\n if self.status == 'read':\n return\n self.status = 'read'\n self.emit('read')\n self.emit('modified')", "def read(self, read):\n\n self._read = read", "def mifare_read(self,address):\n return self.in_data_exchange(bytearray([MIFARE_COMMAND_...
[ "0.6737285", "0.65308905", "0.6498505", "0.64222383", "0.63628715", "0.6303914", "0.62050015", "0.6119331", "0.6107073", "0.60777915", "0.6036015", "0.6027135", "0.6019354", "0.5988685", "0.5962931", "0.594067", "0.59229904", "0.5910504", "0.5862011", "0.58523464", "0.5848433...
0.6479234
3
Check whether the command mailbox is idle. Returns bool True if the command in the mailbox is idle.
def is_cmd_mailbox_idle(self): mb_cmd_word = self.microblaze.read(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET) return (mb_cmd_word & 0x1) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def is_idle(self) -> bool:\n return (\n await self.send_command_and_read_reply(Protocol1Command(command=\"F\")) == \"Y\"\n )", "def is_idle(self) -> bool:\n\n return self.get_runningstate == self.cmd.C815_IDLE_STATE", "def is_idle(self) -> bool:", "def is_idle(self) -> b...
[ "0.7600817", "0.7516991", "0.7223411", "0.7089875", "0.6917131", "0.66209525", "0.6557727", "0.6422757", "0.6312891", "0.6253275", "0.62224126", "0.6158467", "0.6102417", "0.60107124", "0.5923812", "0.58288354", "0.58169097", "0.5816199", "0.5791001", "0.57627046", "0.5757289...
0.8497326
0
Encode truncated classical image into quantum datapoint.
def convert_to_circuit(x): y = np.arcsin(x) z = np.arccos(x**2) qubits = cirq.GridQubit.rect(5, 1) circuit = cirq.Circuit() for i in range(5): circuit.append(cirq.ry(y).on(qubits[i])) circuit.append(cirq.rz(z).on(qubits[i])) return circuit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, encode_data, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def encode(self, signal: np.ndarray) -> np.ndarray:\n pass", "def encode_image(self, image):\n image = self.clip_preprocess(image).unsqueeze(0).to(self.device)\n image_features = self.clip_mo...
[ "0.5878991", "0.58486336", "0.5828887", "0.57244307", "0.56981575", "0.54445195", "0.5440825", "0.5357206", "0.53013325", "0.5262915", "0.5184929", "0.5019855", "0.5011705", "0.49735066", "0.49495837", "0.49373123", "0.49359372", "0.4922893", "0.49128926", "0.49051914", "0.48...
0.0
-1
Detects labels given a GCS path.
def main(path): video_client = (video_intelligence_service_client. VideoIntelligenceServiceClient()) features = [enums.Feature.LABEL_DETECTION] video_context = video_intelligence_pb2.VideoContext() video_context.stationary_camera = True video_context.label_detection_mode = video_intelligence_pb2.FRAME_MODE operation = video_client.annotate_video(path, features, video_context=video_context) print('\nProcessing video for label annotations:') while not operation.done(): sys.stdout.write('.') sys.stdout.flush() time.sleep(10) print('\nFinished processing.') results = operation.result().annotation_results[0] return(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\...
[ "0.75087816", "0.7491403", "0.7457117", "0.61335844", "0.6064078", "0.5983722", "0.58968204", "0.588334", "0.5836181", "0.58187306", "0.57689315", "0.57617164", "0.5745488", "0.5734802", "0.57158095", "0.5700895", "0.5700895", "0.5694585", "0.5681159", "0.5674216", "0.5672181...
0.0
-1
Downloads a blob from the bucket.
def download_blob(bucket_name, source_blob_name, destination_file_name): storage_client = storage.Client() try: bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(source_blob_name) blob.download_to_filename(destination_file_name) print('Blob {} downloaded to {}.'.format( source_blob_name, destination_file_name)) except: print("User does not have access to that bucket. Trying public link:") gcs_url = 'https://%(bucket)s.storage.googleapis.com/%(file)s' % {'bucket':bucket_name, 'file':source_blob_name} urllib.urlretrieve(gcs_url, destination_file_name) print ("Download complete")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_blob(bucket_name, source_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n return blob", "def blob_download(blob_url):\n blob = storage.Object.from_url(blob_url)\n blobc = blob.download()\n ...
[ "0.8135342", "0.8125911", "0.78897315", "0.7834108", "0.77213085", "0.77190185", "0.7650273", "0.7502587", "0.7473543", "0.7457528", "0.73454875", "0.7343923", "0.7278197", "0.72137046", "0.71286523", "0.708186", "0.70657223", "0.6937438", "0.6928341", "0.6895048", "0.6828916...
0.7573154
7
Computes the hamming distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths=True): return _nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(...
[ "0.63828456", "0.6093228", "0.6056598", "0.59507614", "0.5795671", "0.578761", "0.57800555", "0.574706", "0.5681204", "0.56678975", "0.56452966", "0.56424135", "0.563103", "0.558626", "0.55659384", "0.5539205", "0.5504324", "0.5493676", "0.5407647", "0.5401868", "0.53662705",...
0.6753979
0
Computes the Levenshtein edit distance between two sequences, with the AA substitution distances provided in distance_matrix. The default distance matrix has a 1 for mismatches and 0 for matches.
def nb_editdistance(seq_vec1, seq_vec2, distance_matrix=identity_nb_distance_matrix, gap_penalty=1): q_L = seq_vec1.shape[0] s_L = seq_vec2.shape[0] if q_L == s_L: """No gaps: substitution distance This will make it differ from a strict edit-distance since the optimal edit-distance may insert same number of gaps in both sequences""" dist = 0 for i in range(q_L): dist += distance_matrix[seq_vec1[i], seq_vec2[i]] return dist ldmat = np.zeros((q_L, s_L), dtype=np.int16) for row in range(1, q_L): ldmat[row, 0] = row * gap_penalty for col in range(1, s_L): ldmat[0, col] = col * gap_penalty for col in range(1, s_L): for row in range(1, q_L): ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty, ldmat[row, col-1] + gap_penalty, ldmat[row-1, col-1] + distance_matrix[seq_vec1[row-1], seq_vec2[col-1]]) # substitution return ldmat[row, col]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list...
[ "0.6711283", "0.6683752", "0.6674949", "0.6592512", "0.6581954", "0.64532", "0.64532", "0.6445244", "0.6432772", "0.64265794", "0.63696915", "0.6358784", "0.62909234", "0.62251955", "0.6190301", "0.61244994", "0.6103599", "0.6082008", "0.60701114", "0.60523444", "0.60067487",...
0.6989441
0
Compute "tcrdist" distance between two TCR CDR3 sequences. Using default weight, gap penalty, ntrim and ctrim is equivalent to the original distance published in Dash et al, (2017). By setting ntrim and ctrim to 0 and adjusting the dist_weight, it is also possible to compute the CDR1/2 loop distances which can be combined with the CDR3 distance for overall distance. See tcrdist2 package for details.
def nb_tcrdist(seq_vec1, seq_vec2, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True): q_L = seq_vec1.shape[0] s_L = seq_vec2.shape[0] if q_L == s_L: """No gaps: substitution distance""" tmp_dist = 0 for i in range(ntrim, q_L - ctrim): tmp_dist += distance_matrix[seq_vec1[i], seq_vec2[i]] return tmp_dist * dist_weight short_len = min(q_L, s_L) len_diff = abs(q_L - s_L) if fixed_gappos: """If we are not aligning, use a fixed gap position relative to the start of the CDR3 that reflects the typically longer and more variable-length contributions to the CDR3 from the J than from the V. For a normal-length CDR3 this would be after the Cys+5 position (ie, gappos = 6; align 6 rsds on N-terminal side of CDR3). Use an earlier gappos if lenshort is less than 11.""" min_gappos = min(6, 3 + (short_len - 5) // 2) max_gappos = min_gappos else: """The CYS and the first G of the GXG are 'aligned' in the beta sheet the alignment seems to continue through roughly CYS+4 ie it's hard to see how we could have an 'insertion' within that region gappos=1 would be a insertion after CYS gappos=5 would be a insertion after CYS+4 (5 rsds before the gap) the full cdr3 ends at the position before the first G so gappos of len(shortseq)-1 would be gap right before the 'G' shifting this back by 4 would be analogous to what we do on the other strand, ie len(shortseq)-1-4""" min_gappos = 5 max_gappos = short_len - 1 - 4 while min_gappos > max_gappos: min_gappos -= 1 max_gappos += 1 min_dist = -1 # min_count = -1 for gappos in range(min_gappos, max_gappos + 1): tmp_dist = 0 # tmp_count = 0 remainder = short_len - gappos for n_i in range(ntrim, gappos): """n_i refers to position relative to N term""" # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]]) tmp_dist += distance_matrix[seq_vec1[n_i], seq_vec2[n_i]] # tmp_count += 1 #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i]) for c_i in range(ctrim, remainder): """c_i refers to position relative to C term, counting upwards from C term""" tmp_dist += distance_matrix[seq_vec1[q_L - 1 - c_i], seq_vec2[s_L - 1 - c_i]] # tmp_count += 1 #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i]) if tmp_dist < min_dist or min_dist == -1: min_dist = tmp_dist # min_count = tmp_count if min_dist == 0: break """Note that weight_cdr3_region is not applied to the gap penalty""" return min_dist * dist_weight + len_diff * gap_penalty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n ...
[ "0.53767455", "0.4826406", "0.47805288", "0.4753317", "0.47484082", "0.46879807", "0.46756828", "0.46334925", "0.45374662", "0.4479596", "0.44660226", "0.44558287", "0.44372138", "0.44101122", "0.43605304", "0.4340066", "0.43351212", "0.4289836", "0.4284168", "0.42829153", "0...
0.6212199
0
Computes the tcrdist distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True): return _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n ...
[ "0.6867072", "0.6173414", "0.61204684", "0.59654254", "0.5629023", "0.5607086", "0.55902237", "0.55837727", "0.55610365", "0.5435831", "0.54259413", "0.5407338", "0.5377603", "0.5373048", "0.5354197", "0.5333928", "0.5265103", "0.52603585", "0.5210351", "0.5201209", "0.518305...
0.6814871
1
This function works OK on its own. Wrapping it with the above python function was a workaround because joblib and multiprocessing seem to have an issue retaining default arguments with numba functions.
def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True): assert seqs_mat.shape[0] == seqs_L.shape[0] dist = np.zeros(indices.shape[0], dtype=np.int16) for ind_i in nb.prange(indices.shape[0]): query_i = indices[ind_i, 0] seq_i = indices[ind_i, 1] q_L = seqs_L[query_i] s_L = seqs_L[seq_i] if q_L == s_L: """No gaps: substitution distance""" for i in range(ntrim, q_L - ctrim): dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight continue short_len = min(q_L, s_L) len_diff = abs(q_L - s_L) if fixed_gappos: min_gappos = min(6, 3 + (short_len - 5) // 2) max_gappos = min_gappos else: min_gappos = 5 max_gappos = short_len - 1 - 4 while min_gappos > max_gappos: min_gappos -= 1 max_gappos += 1 min_dist = -1 # min_count = -1 for gappos in range(min_gappos, max_gappos + 1): tmp_dist = 0 # tmp_count = 0 remainder = short_len - gappos for n_i in range(ntrim, gappos): """n_i refers to position relative to N term""" # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]]) tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]] # tmp_count += 1 #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i]) for c_i in range(ctrim, remainder): """c_i refers to position relative to C term, counting upwards from C term""" tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]] # tmp_count += 1 #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i]) if tmp_dist < min_dist or min_dist == -1: min_dist = tmp_dist # min_count = tmp_count if min_dist == 0: break dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty return dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_args(self):\n cfunc = njit(g)\n self.assertEqual(cfunc(1, 2, 3), g(1, 2, 3))\n self.assertEqual(cfunc(1, y=2, z=3), g(1, 2, 3))", "def __call__(self, *args, **kwargs):\n if Numba.numba_flag:\n return self.numba_fn(*args, **kwargs)\n else:\n ...
[ "0.65683377", "0.61231744", "0.61210066", "0.58272725", "0.58008033", "0.57025033", "0.56774867", "0.565421", "0.5610815", "0.5579183", "0.5561648", "0.5541063", "0.552428", "0.54533285", "0.5434301", "0.5418918", "0.5369601", "0.5330608", "0.52911884", "0.5281962", "0.519551...
0.0
-1
Computes the Levenshtein edit distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1): #print(indices.shape) #print(seqs_mat.shape) #print(seqs_L.shape) return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomo...
[ "0.7585912", "0.65193325", "0.6018155", "0.5973261", "0.5960617", "0.5927266", "0.5902716", "0.5879615", "0.5879615", "0.5809641", "0.57759446", "0.5760104", "0.57229745", "0.5714925", "0.5702276", "0.5691489", "0.5681923", "0.5659949", "0.5631824", "0.56016165", "0.55863863"...
0.7149528
1
This function works OK on its own. Wrapping it with the above python function was a workaround because joblib and multiprocessing seem to have an issue retaining default arguments with numba functions.
def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1): assert seqs_mat.shape[0] == seqs_L.shape[0] mx_L = nb.int_(np.max(seqs_L)) dist = np.zeros(indices.shape[0], dtype=np.int16) """As long as ldmat is big enough to accomodate the largest sequence its OK to only use part of it for the smaller sequences NOTE that to create a 2D array it must be created 1D and reshaped""" ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L)) for ind_i in nb.prange(indices.shape[0]): query_i = indices[ind_i, 0] seq_i = indices[ind_i, 1] q_L = seqs_L[query_i] s_L = seqs_L[seq_i] if q_L == s_L: """No gaps: substitution distance This will make it differ from a strict edit-distance since the optimal edit-distance may insert same number of gaps in both sequences""" #tmp_dist = 0 for i in range(q_L): dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] #dist[ind_i] = tmp_dist continue """Do not need to re-zero each time""" # ldmat = np.zeros((q_L, s_L), dtype=np.int16) for row in range(1, q_L): ldmat[row, 0] = row * gap_penalty for col in range(1, s_L): ldmat[0, col] = col * gap_penalty for col in range(1, s_L): for row in range(1, q_L): ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty, ldmat[row, col-1] + gap_penalty, ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution dist[ind_i] = ldmat[row, col] return dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_args(self):\n cfunc = njit(g)\n self.assertEqual(cfunc(1, 2, 3), g(1, 2, 3))\n self.assertEqual(cfunc(1, y=2, z=3), g(1, 2, 3))", "def __call__(self, *args, **kwargs):\n if Numba.numba_flag:\n return self.numba_fn(*args, **kwargs)\n else:\n ...
[ "0.65683377", "0.61231744", "0.61210066", "0.58272725", "0.58008033", "0.57025033", "0.56774867", "0.565421", "0.5610815", "0.5579183", "0.5561648", "0.5541063", "0.552428", "0.54533285", "0.5434301", "0.5418918", "0.5369601", "0.5330608", "0.52911884", "0.5281962", "0.519551...
0.0
-1
Computes sequence similarity based on the substitution matrix. Requires that sequences are prealigned and equal length. Operates on strings and a dict substitution matrix
def _nb_subst_metric(seq1, seq2, subst_dict, as_similarity=False): assert len(seq1) == len(seq2) def _sim_func(s1, s2, subst): sim12 = 0. for i in range(len(s1)): k1 = s1[i] + '|' + s2[i] k2 = s2[i] + '|' + s1[i] sim12 += subst.get(k1, subst.get(k2, subst['n|a'])) return sim12 """Site-wise similarity between seq1 and seq2 using the substitution matrix subst""" sim12 = _sim_func(seq1, seq2, subst_dict) if as_similarity: return sim12 else: L = len(seq1) sim11 = _sim_func(seq1, seq1, subst_dict) sim22 = _sim_func(seq2, seq2, subst_dict) D = sim11 + sim22 - 2 * sim12 return D
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeDistMatrix(dict_alignedSequences):\r\n \r\n # check if dictionary with keys as tuples containing integers and values as tuples containing strings\r\n check = True \r\n #1 Check Input is dict\r\n if isinstance(dict_alignedSequences, dict) == False:\r\n check = False\r\n \r\n ...
[ "0.69739157", "0.647544", "0.6389687", "0.635343", "0.63365984", "0.6257071", "0.62165403", "0.6069753", "0.59501123", "0.5907441", "0.5907441", "0.589913", "0.5876702", "0.5853206", "0.5807996", "0.5804335", "0.5802121", "0.58003086", "0.5793083", "0.5780481", "0.57682025", ...
0.6120387
7
Store the names and grades of school students.
def __init__(self): self.students = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()", "def __init__(self):\n self.students = []\n self.grades = {}\n self.isSorted = True", "def add_student():\n\n\...
[ "0.70357305", "0.61918634", "0.6180321", "0.61655444", "0.61187875", "0.6078942", "0.6064046", "0.60455054", "0.60356414", "0.60095865", "0.59675676", "0.5928267", "0.5887183", "0.5879371", "0.5865206", "0.58481586", "0.58402777", "0.5822257", "0.5813019", "0.5812019", "0.580...
0.6250706
1
Add a student to a grade in the roster.
def add_student(self, name: str, grade: int) -> None: school_grade = self.students.setdefault(grade, []) school_grade.append(name) school_grade.sort()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n e...
[ "0.812951", "0.7889351", "0.7747675", "0.75414544", "0.7533531", "0.74755746", "0.74255633", "0.7281956", "0.7157385", "0.70832974", "0.70647204", "0.7050774", "0.7042668", "0.70276797", "0.68648064", "0.67618066", "0.6695062", "0.661738", "0.6594988", "0.65615714", "0.653173...
0.7927675
1
Find all students in the school regardless of grade.
def roster(self) -> list: return [student for grade in sorted(self.students) for student in self.students[grade]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def students(self):\n\t\treturn self.grade_set.all().distinct()", "def get_all_failing_students(self):\n students = MyCollection()\n for student in self.__student_repository.get_all():\n failing = False\n for grade in self.__grade_repository.get_all():\n if \".\...
[ "0.76901233", "0.66902846", "0.65749806", "0.6472811", "0.64696026", "0.64033043", "0.63738966", "0.6277681", "0.62386876", "0.6223855", "0.6206561", "0.6142386", "0.61358625", "0.6032616", "0.6021176", "0.6009096", "0.5954611", "0.59262055", "0.59205323", "0.58794457", "0.58...
0.5828101
22
Find all students in a particular grade.
def grade(self, grade_number: int): return self.students.setdefault(grade_number, [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def students(self):\n\t\treturn self.grade_set.all().distinct()", "def find_students(self):\n from quizzer.models.attendance import Attendance\n from quizzer.models.semester import Semester\n\n semester = Semester.get_current()\n\n for attendance in Attendance.objects: # TODO: Use in...
[ "0.772303", "0.6642984", "0.6595034", "0.6356183", "0.6333465", "0.62915266", "0.6277043", "0.62721986", "0.6257745", "0.6209728", "0.62009317", "0.61742765", "0.6173197", "0.61293", "0.6112251", "0.6028895", "0.60229445", "0.5965616", "0.5958776", "0.5954945", "0.58713627", ...
0.64473695
3
If username and password are not provided, https request will not send authentication headers
def __init__(self, server, ssl=True, username=None, password=None): self.server = server authstring = 'Basic ' + string.strip(base64.encodestring(username + ':' + password)) self.auth = authstring if username and password else None self.connection_method = httplib.HTTPSConnection if ssl else httplib.HTTPConnection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basic_authentication(self, username: str, password: str) -> None:\n self.api_session.auth = (username, password)", "def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate()...
[ "0.68551093", "0.6853429", "0.6646717", "0.66217005", "0.6620933", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", ...
0.5987323
83
Ensures that the path starts with a '/'
def ensure_path(self, page): return page if page.startswith('/') else "/{0}".format(page)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_short_path(short_path):", "def test_normalize_path(self):\n self.assertEqual(normalize_path(\"//////\"), \"/\")\n self.assertEqual(normalize_path(\"//\"), \"/\")\n self.assertEqual(normalize_path(\"//foo/bar//baz\"), \"/foo/bar/baz\")\n self.assertEqual(normalize_path(\"/...
[ "0.7186039", "0.71806985", "0.7127312", "0.71078247", "0.7051519", "0.69835657", "0.68522096", "0.6767365", "0.66817963", "0.6630477", "0.6626429", "0.6597407", "0.65462995", "0.64854527", "0.64659435", "0.64395905", "0.6419547", "0.63963103", "0.63918716", "0.63726133", "0.6...
0.7040683
5
serializes the python object into json string
def to_json(self, json_file): try: json.dump(self.container, open(json_file, 'w'), indent=4) except (FileNotFoundError, IOError) as err: print(err)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self, obj):\n return json.dumps(obj)", "def toJSON(cls, obj):\n return json.dumps(obj)", "def toJSON(object):\n\treturn json.dumps(object, ensure_ascii=False)", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def encode_json(obj):\n\treturn js...
[ "0.84162724", "0.82060945", "0.7896136", "0.7808678", "0.7746026", "0.7719003", "0.769862", "0.7666531", "0.7664871", "0.76284343", "0.76284343", "0.76284343", "0.76284343", "0.76254743", "0.7620307", "0.7614118", "0.7614118", "0.76006883", "0.7582615", "0.754739", "0.7535313...
0.0
-1
This function will watch the given file for any updates.
def watch(log_file): log_file.seek(0, os.SEEK_END) while True: line = LogParser.read_line(log_file) if not line: time.sleep(1) continue yield line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def statusupdate(filepath):\n pass", "def watch(self):", "def watch_changes(directories):\n default_watcher.watch(directories)\n return default_watcher.get_notification_thread()", "def _file_watcher(self, filename, interval):\n fp = open(filename)\n\n line = \"\"\n while self._w...
[ "0.7057741", "0.68224883", "0.6613068", "0.6612686", "0.65972984", "0.6454981", "0.6450486", "0.6387383", "0.6343412", "0.62934804", "0.6246352", "0.6237782", "0.6216912", "0.6172237", "0.6156002", "0.615293", "0.615024", "0.61491203", "0.61296344", "0.60973674", "0.6014658",...
0.5519304
41
Get CPF data for a given date The ephemeris data is stored in a dictionary with tables of times and position under the "positions" key. This table is interpolated in the calculate_initial_values method.
def get_ephemeris(rundate, sat_name): file_key = "slr_ephemeris" ephemeris_data = get_satellite_vars(sat_name) provider_list = config.tech.prediction_providers.list # Find the latest version of the observation file versions = config.files.glob_variable(file_key, "version", r"\d+", file_vars=ephemeris_data) try: ephemeris_data["version"] = sorted(versions)[-1] providers = config.files.glob_variable(file_key, "provider", r"\w+", file_vars=ephemeris_data) for provider in provider_list: if provider in providers: ephemeris_data["provider"] = provider break else: log.fatal(f"No valid provider found: {', '.join(providers)}") except IndexError: log.info("No ephemeris data found") log.info(f"Download manually from https://cddis.nasa.gov/archive/slr/cpf_predicts/{rundate.year}/{sat_name}") log.fatal(f"Please save missing file as '{config.files.path(file_key)}' !") eph_parser = parsers.parse_key(file_key, file_vars=ephemeris_data) eph = calculate_initial_values(eph_parser.as_dict(), rundate) return eph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_initial_values(eph, rundate):\n data = sorted(eph[\"positions\"].items())\n pos_itrs = np.zeros((len(data), 3))\n mjd1, mjd2 = zip(*[t for t, d in data])\n rotation_mat = rotation.trs2gcrs(time.Time(val=mjd1, val2=mjd2, fmt=\"mjd\", scale=\"utc\"))\n tbl = time.Time(val=mjd1, val2=mjd2...
[ "0.6184481", "0.5654636", "0.55653894", "0.55446994", "0.5504313", "0.5501529", "0.5354049", "0.5346375", "0.5329468", "0.5328793", "0.52968925", "0.52822536", "0.5271903", "0.5270245", "0.5265283", "0.526407", "0.52583337", "0.5231366", "0.5223594", "0.5191087", "0.5191087",...
0.5595784
2
Computing initial values for position and velocity in GCRS system This is for later use in orbit integration, from tables in the prediction files. Use a lagrange polynomial in order to interpolate in the tables.
def calculate_initial_values(eph, rundate): data = sorted(eph["positions"].items()) pos_itrs = np.zeros((len(data), 3)) mjd1, mjd2 = zip(*[t for t, d in data]) rotation_mat = rotation.trs2gcrs(time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc")) tbl = time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc") for i in range(0, len(data)): pos_itrs[i] = data[i][1]["pos"] diffsec = np.array([(t - rundate).total_seconds() for t in tbl.utc.datetime]) # Table given in ITRF coordinate system. Convert to GCRS, where the integration of the satellite orbit will # be done pos_gcrs = np.sum(rotation_mat @ pos_itrs[:, :, None], axis=2) log.info("Interpolating data from prediction file in order to get initial pos/vel") pos_gcrs_ip, vel_gcrs_ip = interpolation.interpolate_with_derivative( diffsec, pos_gcrs, np.array([0.0]), kind="lagrange", window=10, bounds_error=False ) eph["initial_pos"] = pos_gcrs_ip[0] eph["initial_vel"] = vel_gcrs_ip[0] return eph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n self.positions = self._generate_initial_positions()\n self.scores = np.array(self.compute_scores(self.positions))\n\n self._pso_data.best_positions = self.positions\n self._pso_data.best_scores = self.scores\n\n magic_constant = 2 # feel free to change FI...
[ "0.6120319", "0.6116722", "0.5932199", "0.5788035", "0.57826406", "0.57638705", "0.5757198", "0.5753652", "0.57429856", "0.57152313", "0.57009196", "0.5668376", "0.5657558", "0.5629421", "0.5624367", "0.5592595", "0.5591798", "0.5582761", "0.5570598", "0.55646664", "0.5564146...
0.6997615
0
Do the initialization and setup for building a postage stamp. In the base class, we check for and parse the appropriate size and position values in config (aka base['stamp'] or base['image']. Values given in base['stamp'] take precedence if these are given in both places (which would be confusing, so probably shouldn't do that, but there might be a use case where it would make sense). config The configuration dict for the stamp field. base The base configuration dict. xsize The xsize of the image to build (if known). ysize The ysize of the image to build (if known). ignore A list of parameters that are allowed to be in config that we can ignore here. i.e. it won't be an error if these parameters are present. logger If given, a logger object to log progress. xsize, ysize, image_pos, world_pos
def setup(self, config, base, xsize, ysize, ignore, logger): # .. Do any custom setup you need to do. # Probably want to call the base class setup function to do the normal determination # of the size and position values. # Extra processing of 'bandpass' argument # Most needed type-checking is done in galsim.bandpass self._req_bp_fields = ['throughput', 'wave_type'] self._opt_bp_fields = ['red_limit', 'blue_limit', 'zeropoint'] try: bp = config['bandpass'] for req in self._req_bp_fields: if req not in bp.keys(): raise ValueError('Must pass field {} for a bandpass object!'.format(req)) # for opt in self._opt_bp_fields: # if opt not in bp.keys(): # config['bandpass'][opt] = None for key in bp.keys(): if key not in (self._req_bp_fields+self._opt_bp_fields): raise ValueError('Field {} is not a valid entry for a bandpass!'.format(key)) except KeyError: raise KeyError('`bandpass` is a required field for a COSMOSChromatic stamp!') extra_ignore = ignore + ['bandpass'] return super(self.__class__, self).setup(config, base, xsize, ysize, extra_ignore, logger)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, config, base, file_num, logger):\n # This is a copy of the base class code\n seed = galsim.config.SetupConfigRNG(base, logger=logger)\n logger.debug('file %d: seed = %d',file_num,seed)\n\n if 'det_num' not in config:\n config['det_num'] = { 'type': 'Sequence',...
[ "0.5815659", "0.5406954", "0.53990644", "0.53855544", "0.5383662", "0.5338992", "0.5320965", "0.5303831", "0.5302118", "0.5297782", "0.52322584", "0.51910156", "0.51902056", "0.5185881", "0.5184995", "0.5149911", "0.5142809", "0.51350987", "0.51125443", "0.5085775", "0.508431...
0.64838976
0
Before drawing the profile, see whether this object can be trivially skipped. The base method checks if the object is completely off the main image, so the intersection bounds will be undefined. In this case, don't bother drawing the postage stamp for this object. prof The profile to draw. image The image onto which to draw the profile (which may be None). method The method to use in drawImage. offset The offset to apply when drawing. config The configuration dict for the stamp field. base The base configuration dict. logger If given, a logger object to log progress. whether to skip drawing this object.
def updateSkip(self, prof, image, method, offset, config, base, logger): # NOTE: There are currently unresolved issues with the image size checking of chromatic # objects. For now, we ignore any possible speed increases and skip the check. # if isinstance(prof, galsim.ChromaticObject): # return False if prof is not None and base.get('current_image',None) is not None: if image is None: prof = base['wcs'].toImage(prof, image_pos=base['image_pos']) # NOTE: Old version: # N = prof.getGoodImageSize(1.) if isinstance(prof, galsim.GSObject): N = prof.getGoodImageSize(1.) elif isinstance(prof, galsim.ChromaticObject): # TODO: Finish implementation # return False pudb.set_trace() # Find the suggested image size for each object given the choice of scale, and use the # maximum just to be safe. print '\nprof.original = {}'.format(prof.original) print '\nprof.original.obj_list = {}'.format(prof.original.obj_list) # print '\nprof.objlist = {}'.format(prof.original.obj_list) obj_list = prof.original.obj_list possible_im_sizes = [] for obj in obj_list: print '\n obj : {}'.format(obj) possible_im_sizes.append([ ob.getGoodImageSize(1.) for ob in obj]) print 'possible_im_sizes : {}'.format(possible_im_sizes) N = np.max(possible_im_sizes) N += 2 + int(np.abs(offset.x) + np.abs(offset.y)) bounds = galsim._BoundsI(1,N,1,N) else: bounds = image.bounds # Set the origin appropriately stamp_center = base['stamp_center'] if stamp_center: bounds = bounds.shift(stamp_center - bounds.center) else: bounds = bounds.shift(base.get('image_origin',galsim.PositionI(1,1)) - galsim.PositionI(bounds.xmin, bounds.ymin)) overlap = bounds & base['current_image'].bounds if not overlap.isDefined(): logger.info('obj %d: skip drawing object because its image will be entirely off ' 'the main image.', base['obj_num']) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self, prof, image, method, offset, config, base, logger, **kwargs):\n # ... draw prof onto the given image (making a new Image if necessary)\n if prof is None:\n return image\n else:\n logger = galsim.config.LoggerWrapper(logger)\n # Setup the kwargs t...
[ "0.6533734", "0.51599497", "0.47828445", "0.47232333", "0.45972005", "0.45731962", "0.45694324", "0.45572576", "0.45465982", "0.45340365", "0.44940332", "0.44605252", "0.44413173", "0.44082165", "0.4281743", "0.4263862", "0.42636248", "0.42611814", "0.42450166", "0.42342076", ...
0.67846763
0
Draw the profile on the postage stamp image. This is a slightly modified version of `stamp.DrawBasic()` which allows drawing of chromatic objects. prof The profile to draw. image The image onto which to draw the profile (which may be None). method The method to use in drawImage. offset The offset to apply when drawing. config The configuration dict for the stamp field. base The base configuration dict. logger If given, a logger object to log progress. the resulting image
def draw(self, prof, image, method, offset, config, base, logger, **kwargs): # ... draw prof onto the given image (making a new Image if necessary) if prof is None: return image else: logger = galsim.config.LoggerWrapper(logger) # Setup the kwargs to pass to drawImage # (Start with any additional kwargs given as extra kwargs to DrawBasic and add to it.) kwargs['image'] = image kwargs['offset'] = offset kwargs['method'] = method if 'wmult' in config and 'wmult' not in kwargs: # pragma: no cover kwargs['wmult'] = galsim.config.ParseValue(config, 'wmult', base, float)[0] if 'wcs' not in kwargs and 'scale' not in kwargs: kwargs['wcs'] = base['wcs'].local(image_pos = base['image_pos']) if method == 'phot' and 'rng' not in kwargs: kwargs['rng'] = galsim.config.GetRNG(config, base, logger, "method='phot'") # Check validity of extra phot options: max_extra_noise = None if 'n_photons' in config and 'n_photons' not in kwargs: if method != 'phot': raise AttributeError('n_photons is invalid with method != phot') if 'max_extra_noise' in config: logger.warning( "Both 'max_extra_noise' and 'n_photons' are set in config dict, "+ "ignoring 'max_extra_noise'.") kwargs['n_photons'] = galsim.config.ParseValue(config, 'n_photons', base, int)[0] elif 'max_extra_noise' in config: max_extra_noise = galsim.config.ParseValue(config, 'max_extra_noise', base, float)[0] if method != 'phot' and max_extra_noise is not None: raise AttributeError('max_extra_noise is invalid with method != phot') if 'poisson_flux' in config and 'poisson_flux' not in kwargs: if method != 'phot': raise AttributeError('poisson_flux is invalid with method != phot') kwargs['poisson_flux'] = galsim.config.ParseValue(config, 'poisson_flux', base, bool)[0] if max_extra_noise is not None and 'max_extra_noise' not in kwargs: if max_extra_noise < 0.: raise ValueError("image.max_extra_noise cannot be negative") if 'image' in base and 'noise' in base['image']: noise_var = galsim.config.CalculateNoiseVariance(base) else: raise AttributeError("Need to specify noise level when using max_extra_noise") if noise_var < 0.: raise ValueError("noise_var calculated to be < 0.") max_extra_noise *= noise_var kwargs['max_extra_noise'] = max_extra_noise if logger.isEnabledFor(logging.DEBUG): # Don't output the full image array. Use str(image) for that kwarg. alt_kwargs = dict([(k,str(kwargs[k]) if isinstance(kwargs[k],galsim.Image) else kwargs[k]) for k in kwargs]) logger.debug('obj %d: drawImage kwargs = %s',base.get('obj_num',0), alt_kwargs) logger.debug('obj %d: prof = %s',base.get('obj_num',0),prof) try: # NOTE: Old version: # image = prof.drawImage(**kwargs) if isinstance(prof, galsim.GSObject): image = prof.drawImage(**kwargs) elif isinstance(prof, galsim.ChromaticObject): bp = {} for key in (self._req_bp_fields+self._opt_bp_fields): try: bp[key] = config['bandpass'][key] except KeyError: bp[key] = None bandpass = galsim.Bandpass(blue_limit=bp['blue_limit'], red_limit=bp['red_limit'], wave_type=bp['wave_type'], throughput=bp['throughput'], zeropoint=bp['zeropoint']) image = prof.drawImage(bandpass=bandpass, **kwargs) except Exception as e: # pragma: no cover logger.debug('obj %d: prof = %r', base.get('obj_num',0), prof) raise return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateSkip(self, prof, image, method, offset, config, base, logger):\n\n # NOTE: There are currently unresolved issues with the image size checking of chromatic\n # objects. For now, we ignore any possible speed increases and skip the check.\n # if isinstance(prof, galsim.ChromaticObject):...
[ "0.56249416", "0.49046072", "0.4773243", "0.4728293", "0.47255272", "0.46967715", "0.460233", "0.45648476", "0.45566934", "0.45022318", "0.44824857", "0.44786713", "0.44598132", "0.4454975", "0.44341892", "0.4383001", "0.43778774", "0.43481213", "0.43244997", "0.43213403", "0...
0.70576626
0
Take a draft_dict that was already validated by draft_dict_validator then further sanitize, validate, and transform it. Ultimately return this "further validated" draft dict. It will have a slightly different set of keys the values for which can be used to directly create a Draft object.
def further_validated_draft_dict( draft_dict: Dict[str, Any], user_profile: UserProfile ) -> Dict[str, Any]: content = normalize_body(draft_dict["content"]) timestamp = draft_dict.get("timestamp", time.time()) timestamp = round(timestamp, 6) if timestamp < 0: # While it's not exactly an invalid timestamp, it's not something # we want to allow either. raise JsonableError(_("Timestamp must not be negative.")) last_edit_time = timestamp_to_datetime(timestamp) topic = "" recipient_id = None to = draft_dict["to"] if draft_dict["type"] == "stream": topic = truncate_topic(draft_dict["topic"]) if "\0" in topic: raise JsonableError(_("Topic must not contain null bytes")) if len(to) != 1: raise JsonableError(_("Must specify exactly 1 stream ID for stream messages")) stream, sub = access_stream_by_id(user_profile, to[0]) recipient_id = stream.recipient_id elif draft_dict["type"] == "private" and len(to) != 0: to_users = get_user_profiles_by_ids(set(to), user_profile.realm) try: recipient_id = recipient_for_user_profiles(to_users, False, None, user_profile).id except ValidationError as e: # nocoverage raise JsonableError(e.messages[0]) return { "recipient_id": recipient_id, "topic": topic, "content": content, "last_edit_time": last_edit_time, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _draft_from_response(data):\n return Draft(\n uuid=UUID(data['uuid']),\n bundle_uuid=UUID(data['bundle_uuid']),\n name=data['name'],\n updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),\n files={\n path: DraftFile(path=path, **file)\n ...
[ "0.56550765", "0.56206477", "0.5450413", "0.52529806", "0.52475804", "0.5194902", "0.51102144", "0.51091665", "0.50821674", "0.5033902", "0.5002", "0.49568045", "0.4842201", "0.48251075", "0.48187992", "0.48105076", "0.48069793", "0.47712836", "0.47657195", "0.47256124", "0.4...
0.7376656
0
Create drafts in bulk for a given user based on the draft dicts. Since currently, the only place this method is being used (apart from tests) is from the create_draft view, we assume that the drafts_dicts are syntactically valid (i.e. they satisfy the draft_dict_validator).
def do_create_drafts(draft_dicts: List[Dict[str, Any]], user_profile: UserProfile) -> List[Draft]: draft_objects = [] for draft_dict in draft_dicts: valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile) draft_objects.append( Draft( user_profile=user_profile, recipient_id=valid_draft_dict["recipient_id"], topic=valid_draft_dict["topic"], content=valid_draft_dict["content"], last_edit_time=valid_draft_dict["last_edit_time"], ) ) created_draft_objects = Draft.objects.bulk_create(draft_objects) event = { "type": "drafts", "op": "add", "drafts": [draft.to_dict() for draft in created_draft_objects], } send_event(user_profile.realm, event, [user_profile.id]) return created_draft_objects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_draft(auth, subject, body, addresses, user_id, cc_addresses=[], attachments_list=None):\r\n data = {}\r\n data['Subject'] = subject\r\n data['Body'] = {}\r\n data['Body']['ContentType'] = 'HTML'\r\n data['Body']['Content'] = body\r\n data['ToRecipients'] = [{'EmailAddress': {'Address':...
[ "0.56473714", "0.55276287", "0.5469983", "0.5152256", "0.5134393", "0.5032918", "0.4999463", "0.49490035", "0.49176887", "0.49063164", "0.48992178", "0.48911917", "0.48814285", "0.48810473", "0.48643064", "0.48627475", "0.48573655", "0.48487023", "0.48398086", "0.48395732", "...
0.7956114
0
Edit/update a single draft for a given user. Since the only place this method is being used from (apart from tests) is the edit_draft view, we assume that the drafts_dict is syntactically valid (i.e. it satisfies the draft_dict_validator).
def do_edit_draft(draft_id: int, draft_dict: Dict[str, Any], user_profile: UserProfile) -> None: try: draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile) except Draft.DoesNotExist: raise ResourceNotFoundError(_("Draft does not exist")) valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile) draft_object.content = valid_draft_dict["content"] draft_object.topic = valid_draft_dict["topic"] draft_object.recipient_id = valid_draft_dict["recipient_id"] draft_object.last_edit_time = valid_draft_dict["last_edit_time"] draft_object.save() event = {"type": "drafts", "op": "update", "draft": draft_object.to_dict()} send_event(user_profile.realm, event, [user_profile.id])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_draft(self):\r\n EmptyPromise(\r\n lambda: self.q(css='.create-draft').present,\r\n 'Wait for edit draft link to be present'\r\n ).fulfill()\r\n\r\n self.q(css='.create-draft').first.click()\r\n\r\n EmptyPromise(\r\n lambda: self.q(css='.editing...
[ "0.63595945", "0.6280873", "0.623346", "0.62005955", "0.618698", "0.61712956", "0.616757", "0.6136594", "0.6136562", "0.61018544", "0.60939956", "0.60843796", "0.5883826", "0.58486557", "0.5807267", "0.57433337", "0.5733624", "0.5710894", "0.56908655", "0.56681085", "0.563653...
0.7761259
0
Delete a draft belonging to a particular user.
def do_delete_draft(draft_id: int, user_profile: UserProfile) -> None: try: draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile) except Draft.DoesNotExist: raise ResourceNotFoundError(_("Draft does not exist")) draft_id = draft_object.id draft_object.delete() event = {"type": "drafts", "op": "remove", "draft_id": draft_id} send_event(user_profile.realm, event, [user_profile.id])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_draft(draft_uuid):\n api_request('delete', api_url('drafts', str(draft_uuid)))", "def delete(self,\n draft_id,\n ):\n return self._invoke('delete',\n {\n 'draft_id': draft_id,\n })", ...
[ "0.7564674", "0.7383004", "0.6840054", "0.6774973", "0.67743796", "0.66971105", "0.6603117", "0.65684706", "0.654473", "0.6492794", "0.6456428", "0.63981634", "0.6362671", "0.63221097", "0.62822104", "0.62783", "0.62724733", "0.62385744", "0.61420053", "0.61398315", "0.612225...
0.77435535
0
Get zero version `0.0.0`
def zero(cls: Type[_R]) -> _R: return cls("0.0.0")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def version_number() -> int:\n return 0", "def get_version():\n return '%d.%d.%d' % version_info", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError...
[ "0.7245187", "0.668971", "0.66882396", "0.6665648", "0.6570469", "0.64901686", "0.64345765", "0.64268774", "0.63672644", "0.6360634", "0.63367814", "0.6328667", "0.6325577", "0.63126534", "0.6253386", "0.6247568", "0.62471354", "0.6241517", "0.6227065", "0.6226674", "0.621898...
0.68623227
1
Create a copy of a current version instance.
def copy(self: _R) -> _R: return self.__class__(self.dumps())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_clone(self, version_id):\n raise Exception(\"TODO\")", "def __copy__(self):\n return self.__class__(self.baseurl, self.template, self.selection[:],\n self.slice[:], self.application)", "def make_reversion_to_copy(self, copy):\n if copy.copy_of != se...
[ "0.69766486", "0.68571734", "0.6853616", "0.6696837", "0.66747206", "0.6633099", "0.6621376", "0.6601393", "0.65898925", "0.65894204", "0.65894204", "0.6584453", "0.65257055", "0.6520473", "0.6510755", "0.6490696", "0.6472705", "0.6456239", "0.6437196", "0.6430004", "0.641598...
0.63095486
32
Get next release version.
def bump_release( self: _R, release_type: Literal["major", "minor", "micro"] = VersionParts.MICRO, inc: int = 1, ) -> _R: if release_type == VersionParts.MAJOR: return self.bump_major(inc) if release_type == VersionParts.MINOR: return self.bump_minor(inc) return self.bump_micro(inc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patc...
[ "0.8136026", "0.8127919", "0.7517721", "0.71033996", "0.6974199", "0.6932763", "0.6864649", "0.6853223", "0.6828289", "0.6774772", "0.67444694", "0.6615308", "0.6605916", "0.6588041", "0.65674776", "0.65634", "0.65570605", "0.6548462", "0.6492869", "0.64794105", "0.6404592", ...
0.0
-1
Get next major version.
def bump_major(self: _R, inc: int = 1) -> _R: if not self.is_stable and self.minor == 0 and self.micro == 0: return self.get_stable().bump_major(inc - 1) return self._replace( BaseVersion( epoch=0, release=(self.major + inc, 0, 0), pre=None, post=None, dev=None, local=None, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patc...
[ "0.82029897", "0.7808802", "0.72779334", "0.71113545", "0.7102261", "0.702806", "0.69866174", "0.69342357", "0.6925223", "0.69188356", "0.6805731", "0.67799294", "0.6777864", "0.66487956", "0.6535175", "0.6533176", "0.6531343", "0.65151787", "0.6488693", "0.6478734", "0.64504...
0.63819283
24
Get next minor version.
def bump_minor(self: _R, inc: int = 1) -> _R: if not self.is_stable and self.micro == 0: return self.get_stable().bump_minor(inc - 1) return self._replace( BaseVersion( epoch=0, release=(self.major, self.minor + inc, 0), pre=None, post=None, dev=None, local=None, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patc...
[ "0.8125392", "0.763029", "0.7602886", "0.7530592", "0.7165768", "0.6980305", "0.69532543", "0.69383895", "0.6928541", "0.6895772", "0.687624", "0.6815259", "0.67866236", "0.67418855", "0.67331123", "0.66247994", "0.65771586", "0.6505136", "0.64826417", "0.64826417", "0.647756...
0.6520823
17
Get next micro version.
def bump_micro(self: _R, inc: int = 1) -> _R: if not self.is_stable: return self.get_stable().bump_micro(inc - 1) return self._replace( BaseVersion( epoch=0, release=(self.major, self.minor, self.micro + inc), pre=None, post=None, dev=None, local=None, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patc...
[ "0.6825203", "0.6565859", "0.6408749", "0.6236738", "0.6216092", "0.60519856", "0.60511297", "0.6045051", "0.5854707", "0.5835003", "0.58286273", "0.577659", "0.57726276", "0.5756602", "0.5756459", "0.5703252", "0.56749344", "0.5653698", "0.5628476", "0.5624355", "0.5614559",...
0.6671826
1
Get next prerelease version. If version is stable bump `micro` for a proper versioning as well. Defaults to `rc` prereleases.
def bump_prerelease( self: _R, inc: int = 1, release_type: Literal["rc", "alpha", "beta", "a", "b"] = None, bump_release: Literal["major", "minor", "micro"] = VersionParts.MICRO, ) -> _R: prerelease_type = release_type or self.prerelease_type or VersionParts.RC increment = inc if not self.base.pre else (max(self.base.pre[-1], 1) + inc) pre = (prerelease_type, increment) new_version = self._replace(self._copy_base(pre=pre)) if new_version < self: prerelease_type = release_type or VersionParts.RC new_version = self.get_stable().bump_release(bump_release) if prerelease_type != self.prerelease_type: increment = inc base = BaseVersion( epoch=0, release=new_version.base.release, pre=(prerelease_type, increment), post=None, dev=None, local=None, ) return self._replace(base)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patc...
[ "0.74618196", "0.73501813", "0.72785693", "0.6688966", "0.6573726", "0.6516371", "0.63295984", "0.632609", "0.61778533", "0.60363823", "0.60113215", "0.60052055", "0.59608686", "0.59551674", "0.594888", "0.5904299", "0.5897116", "0.5894186", "0.5864545", "0.58246434", "0.5817...
0.6240163
8
Get next postrelease version.
def bump_postrelease(self: _R, inc: int = 1) -> _R: post = (VersionParts.POST, max(inc, 1)) base_post: Optional[Tuple[str, int]] = self._version.post if base_post: post = (VersionParts.POST, max(base_post[1], 1) + inc) base = BaseVersion( epoch=0, release=self._version.release, pre=None, post=post, dev=None, local=None, ) return self._replace(base)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patc...
[ "0.74370474", "0.7424086", "0.7209765", "0.7072295", "0.68549937", "0.68467635", "0.6822434", "0.6782135", "0.675786", "0.6439284", "0.64025617", "0.6362353", "0.6344141", "0.63141763", "0.6309239", "0.6289336", "0.6222575", "0.610926", "0.6093002", "0.60867167", "0.6048308",...
0.68445814
6
Whether version is not prerelease or devrelease.
def is_stable(self) -> bool: return not self.is_prerelease
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_release():\n return VERSION[-1]", "def is_release(self):\n # version string: N.N.N.N is for release.\n return bool(re.match(r'^[\\d.]+$', self.version))", "def is_production(version=None):\n return is_host_google() and is_default_version(version)", "def is_0_release(release: str) -> ...
[ "0.7988306", "0.7390222", "0.72329897", "0.71225476", "0.70509773", "0.674931", "0.67251575", "0.66882837", "0.6623015", "0.6601849", "0.65394694", "0.64932364", "0.64372605", "0.639243", "0.63218457", "0.6310093", "0.62938845", "0.6283808", "0.6193949", "0.6171282", "0.60062...
0.7183544
3
Get stable version from pre or post release.
def get_stable(self: _R) -> _R: return self._replace( BaseVersion( epoch=0, release=(self.major, self.minor, self.micro), pre=None, post=None, dev=None, local=None, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patc...
[ "0.65761906", "0.64690495", "0.64539593", "0.6419955", "0.6369132", "0.6336212", "0.62359613", "0.62115157", "0.61312795", "0.612242", "0.6118793", "0.61041874", "0.6091029", "0.6033105", "0.59679747", "0.59453046", "0.5935324", "0.5899036", "0.58957833", "0.58908105", "0.588...
0.6867934
0
v_1 w_1 + ... + v_n w_n
def dot(v, w): return sum(v_i * w_i for v_i, w_i in zip(v, w))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]", "def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]", "def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors mu...
[ "0.70978653", "0.70766723", "0.70762354", "0.65042007", "0.6414303", "0.63884014", "0.63861686", "0.63567185", "0.63381374", "0.63309324", "0.6324586", "0.62767076", "0.6206558", "0.617885", "0.608573", "0.6083657", "0.6048553", "0.6001456", "0.5981003", "0.59767973", "0.5976...
0.6353202
12
makes variables for subsets of the unicode char sets
def assignCharacters(otherChars, where): if(isinstance(otherChars, list)): where.append(otherChars) if(isinstance(otherChars, str)): tmp = " ".join(otherChars).split() where.append(tmp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _characters(self):\n self.characters = list(\n set([item for sublist in self.grid for item in sublist])\n )\n return self.characters", "def unique_characters(self):\n charset = set()\n for doc in self.docs.values():\n charset |= set(doc)\n\n ret...
[ "0.58729345", "0.5847239", "0.58128977", "0.56627953", "0.5647866", "0.56441444", "0.56391495", "0.5624327", "0.55717635", "0.55333704", "0.5514946", "0.5514946", "0.5495993", "0.5446798", "0.544425", "0.5440407", "0.54331005", "0.54176193", "0.5314018", "0.5313979", "0.53131...
0.53112906
21
checks for common characters in the password and returns the characters contained somewhere in the password string
def makeList(username, url, caseSensitive = False, wildCards = True): charList = [] for ch in lower: # check for ch in if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) for ch in numbers: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) for ch in special: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) for ch in other: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) if(caseSensitive): for ch in upper: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) if(wildCards): for ch in wildcards: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) return charList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyze_password(password):\n vowels = number_of_vowels(password)\n if valid_password(password) is True:\n result = password + \" is a valid password and contains \" + str(vowels) + \" vowels.\"\n else:\n result = password + \" is not a valid password and contains \" + str(vowels) + \" v...
[ "0.66974944", "0.64420664", "0.6421295", "0.642044", "0.63667226", "0.6365107", "0.6325546", "0.63089836", "0.6299588", "0.62728053", "0.62592286", "0.62546295", "0.6249797", "0.62487674", "0.6225312", "0.6205716", "0.62037766", "0.62024164", "0.6148876", "0.61458707", "0.614...
0.0
-1
checks for a character in position i, and records it when the character at postion i was found.
def checkPass(username, url, charList, n): # dikt = {} password = "" for i in range(0, n): if(testPassword(password, username, url)): return password #password is found! # https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python ch = findChar(username, url, charList, i) # if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string # use try except instead of if(isinstance(ch, int)): # https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not try: password += ch except TypeError: # print(i) password += str(ch) #should be blank # raise ValueError("index i has no matching character") return password #only reached if password is too long for the given n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inside_char(self, char, marker, tracker, i):\n if char == marker[0]:\n tracker.append(i)\n elif char == marker[1]:\n try:\n tracker.pop()\n except IndexError:\n pass\n return tracker", "def _charToIndex(self,ch): \n ...
[ "0.7345728", "0.63285416", "0.6295855", "0.6295855", "0.6295855", "0.6295855", "0.6248521", "0.6134753", "0.6081834", "0.6037675", "0.60087425", "0.60087425", "0.60087425", "0.5981202", "0.58713824", "0.5847541", "0.5844085", "0.5784419", "0.5778379", "0.57567716", "0.5748923...
0.0
-1
helper function for checkPass returns the first element of charList found that works for the password at index i if it fails to find a character at i, prints i and returns an empty string instead of returning i.
def findChar(username, url, charList, i): for ch in charList: if(checkPasswordCharacter(ch, username, url, index = i)): return ch #only runs if no ch in charList match: # return i #oof, there's no match if i is out of bounds, e.g. len(password) < i print("Missing: " + i) #so I know when it's not a match return "" #return an empty string instead # Note to self: should not return an _ because it'll match an _ if wildCards are true (default). # If wildCards is false, this will just skip characters that don't match anything!
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkPass(username, url, charList, n):\n # dikt = {}\n password = \"\"\n for i in range(0, n):\n if(testPassword(password, username, url)):\n return password #password is found! \n # https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python\n ch...
[ "0.71826273", "0.6203832", "0.6179515", "0.6156986", "0.61567867", "0.60574234", "0.6030019", "0.59959686", "0.59700435", "0.5946914", "0.5921345", "0.5909927", "0.5908256", "0.58809274", "0.5878235", "0.5816463", "0.5804979", "0.5787845", "0.5780203", "0.57457215", "0.572765...
0.77611613
0
List of characters in table names
def makeTableList(url, caseSensitive = False, wildCards = True): charList = [] for ch in lower: # ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch) for ch in numbers: ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch) for ch in special: ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch) for ch in other: ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch) if(caseSensitive): for ch in upper: # ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch, url) if(wildCards): for ch in wildCards: # ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch, url) return charList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]", "def makeTableNamesList(n, ):", "def get_table_column_name(self, table):\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM %s\" % table)\n names = list(map(lambda x: x[0]...
[ "0.6977511", "0.68698466", "0.66727006", "0.6658325", "0.6603546", "0.65911764", "0.6573043", "0.6499986", "0.6483981", "0.6470874", "0.6378552", "0.6191215", "0.61647415", "0.614972", "0.61342114", "0.610629", "0.6102741", "0.6078974", "0.60369515", "0.5990207", "0.5947591",...
0.57981765
28
List of characters in database names
def makeDatabaseList(): charList = [] for ch in lower: # ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch) for ch in numbers: ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch) for ch in special: ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch) for ch in other: ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch) if(caseSensitive): for ch in upper: # ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch, url) if(wildCards): for ch in wildCards: # ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch, url) return charList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeDatabaseNamesList(n, ):", "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n ...
[ "0.6628727", "0.5976508", "0.5936507", "0.5901102", "0.58635503", "0.5853042", "0.5845213", "0.58389676", "0.58144695", "0.58012706", "0.5732453", "0.5721888", "0.5696421", "0.5669424", "0.56576335", "0.5632975", "0.56264454", "0.56206435", "0.5605173", "0.5598442", "0.557013...
0.7128734
0
List of table names
def makeTableNamesList(n, ):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTableNames(self):\n\n # The specific command depends on whether we are using mysql or sqlite\n if self.connector == 'mysql':\n sqlcmd = (\"SELECT table_name FROM INFORMATION_SCHEMA.TABLES \" +\n \"WHERE table_schema='\" + self.dbname + \"'\")\n else:\n ...
[ "0.8072151", "0.8006719", "0.8002535", "0.7987537", "0.78607845", "0.78607196", "0.78587925", "0.78372866", "0.7792003", "0.7758121", "0.7746107", "0.7651905", "0.76492196", "0.76076037", "0.7603518", "0.75557685", "0.7516995", "0.7435231", "0.74144286", "0.7379448", "0.73682...
0.698688
32
List of database names
def makeDatabaseNamesList(n, ):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = sel...
[ "0.80087274", "0.78206784", "0.77191776", "0.7642941", "0.7639768", "0.7624062", "0.7575778", "0.75723255", "0.7553429", "0.75302035", "0.752453", "0.73743457", "0.7372475", "0.72860897", "0.7283433", "0.7283433", "0.72620213", "0.72427213", "0.72408473", "0.7234212", "0.7225...
0.6749918
29
makeList generalized to use the boolean function f.
def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bool(bool_list):\n new_list = []\n for lst in bool_list:\n for item in lst:\n new_list.append(item)\n if True in new_list:\n return True\n else:\n return False", "def get_list_of_bool2(self):\n pass", "def build():\n return [5,2,1,3,6] # true\n ...
[ "0.63551515", "0.63446385", "0.6126535", "0.5955366", "0.5880952", "0.5871879", "0.58662516", "0.5822809", "0.57801163", "0.5777717", "0.5704812", "0.56913203", "0.55946165", "0.55707604", "0.55597055", "0.5552575", "0.55456346", "0.55446935", "0.5511639", "0.54347694", "0.54...
0.56456155
12
returns list of characters that appear in any username
def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True): """ sqlzoo characters ['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%'] """ lst = [] for ch in special: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) for ch in lower: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) for ch in numbers: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) for ch in other: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) if(caseSensitive): for ch in upper: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) if(wildCards): for ch in wildcards: lst.append(ch) #it'll match if there's users return lst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]", "def check_user_name(self, username):\n usernames = []\n for user in self.__users:\n if user['username'] == username:\n usernames.append(user)\n retu...
[ "0.65302527", "0.6494948", "0.63683933", "0.6244168", "0.6195335", "0.61639106", "0.61376554", "0.6136397", "0.6090331", "0.6048526", "0.6043042", "0.6035452", "0.60292345", "0.6010415", "0.59815353", "0.5924206", "0.5911422", "0.58786315", "0.5807217", "0.57925546", "0.57895...
0.68477184
0
construct sequences and use those to inform the choice of strings. So if a,b,c,d matches, check aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd.
def checkUsernameSequences(n, ch, url, tableName, minLen = 1, maxLen = 2): if(minLen == 1): strLst = ch # assumes all of ch is a match else: strLst = [] for k in range(minLen, maxLen + 1): lst = generateSubSequences(k, ch) sublst = [x for x in lst if userNameLike(x, url, tableName)] # list comprehensions with conditions: # https://stackoverflow.com/questions/6475314/python-for-in-loop-preceded-by-a-variable strLst += sublst return strLst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coding_strand_to_AA(dna):\n Seq = ''\n for i in range(0,len(dna),3): \n triple = dna[i:i+3]\n print triple\n for k in range(len(codons)):\n if triple in codons[k]: \n print \"Casey Rocks\"\n print codons[k]\n amino = aa[k]\n ...
[ "0.58296806", "0.57344776", "0.57000065", "0.569295", "0.5626267", "0.5622596", "0.56082696", "0.56058365", "0.5604716", "0.55872947", "0.5555464", "0.5515502", "0.55089283", "0.54992986", "0.5488709", "0.5476587", "0.54706156", "0.54512507", "0.5450281", "0.5441451", "0.5432...
0.5192423
60
generates all subsequences of ch with length k
def generateSubSequences(k, ch): seq = ["".join(c) for c in itertools.product(ch, repeat = k)] # discussion about the best way to do this: # https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings return seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]", "def cut_kmer(sequence, k_mer):\n for i in range(0, len(sequence)-k_mer + 1):\n yield sequence[i:i+k_mer]", "def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probabliti...
[ "0.6393046", "0.63518596", "0.6346528", "0.63429147", "0.62951434", "0.62502956", "0.62439233", "0.61887056", "0.6169127", "0.6153225", "0.61380696", "0.6062542", "0.6060054", "0.6024283", "0.6011528", "0.6003349", "0.59933025", "0.5982021", "0.5982021", "0.59802747", "0.5954...
0.85543895
0
returns a list of usernames
def userNames(lst, url, tableName): n = len(lst) # https://docs.python.org/3/library/itertools.html#itertools.product # https://stackoverflow.com/questions/3034014/how-to-apply-itertools-product-to-elements-of-a-list-of-lists lst2 = list(itertools.product(*lst)) lst3 = list(map("".join, lst2)) # # Maybe use checkUsernameSequences here, # then add a check to reduce the amount of possibilities before building lst? # seq = checkUsernameSequences(n, lst, url, tableName, minLen = 2, maxLen = 2) # does not include the single characters since minLen > 1 lst4 = filt(seq, lst3) """# next time: find matching strings. That should (hopefully) reduce the space to search. REMEMBER, this filtering will miss all single character usernames!!! https://docs.python.org/3/library/re.html#regular-expression-syntax https://stackoverflow.com/questions/3640359/regular-expressions-search-in-list https://stackoverflow.com/questions/3040716/python-elegant-way-to-check-if-at-least-one-regex-in-list-matches-a-string https://stackoverflow.com/questions/19300020/python-match-a-string-with-regex https://stackoverflow.com/questions/37974047/if-any-strings-in-a-list-match-regex """ lst5 = [x for x in lst4 if checkUsername(x, url, tableName)] # lst = list(map(checkUsername, lst2)) return lst5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users", "def user_list(self):\n self.cur.execute(\"SELECT username FROM users\")\n users = []\n for...
[ "0.8277952", "0.8237998", "0.821286", "0.8186258", "0.80364215", "0.79730445", "0.78307825", "0.7717235", "0.7714539", "0.7674189", "0.7632501", "0.7603505", "0.75877964", "0.75004154", "0.7474869", "0.74285185", "0.7409626", "0.7409626", "0.73354083", "0.7305028", "0.7255649...
0.0
-1
filters lst. returns sublist
def filt(seq, lst): regex = "(" + ")|(".join(seq) + ")" regex = re.compile(regex) slst = list(filter(regex.search, lst)) return slst # still need a checkUsername function
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sublist(self, lst, exclude, length):\n for sublist in itertools.combinations([e for e in lst if e not in exclude], length):\n yield list(sublist)", "def my_filter(function,lst):\n return list(x for x in lst if function(x))", "def filter(lst):\n res = []\n for name in lst:\n ...
[ "0.674777", "0.67473686", "0.6627806", "0.63558894", "0.63215184", "0.6267986", "0.6242654", "0.6209429", "0.6174839", "0.60629857", "0.6052656", "0.5999534", "0.596492", "0.59513366", "0.59276193", "0.59161556", "0.58984244", "0.58968115", "0.5890496", "0.5857357", "0.581732...
0.54409194
42
Readd a user's postgresql database.
def _add_postgresql(user, options, dump = None): if dump is not None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postgres_rebuild(name=None, user=None):\n name = check(name, 'name: The dabatase name to create.')\n user = check(user, 'user: the user to grant privileges.')\n\n drop_postgres_db(name=name, user=user)\n create_postgres_db(name=name, user=user, password=password)", "def recreate_db():\n drop_d...
[ "0.7393419", "0.6982639", "0.6812037", "0.642954", "0.6232582", "0.62139505", "0.6179955", "0.5981221", "0.5960473", "0.5953316", "0.5928391", "0.59170234", "0.58603674", "0.58522147", "0.5849216", "0.5843206", "0.5840966", "0.5813732", "0.5810215", "0.58094305", "0.580628", ...
0.60755813
7
Adds a user's mysql tables back into the OCF database.
def _add_mysql(user, options, dump = None): # Access the new username with user["username"] pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_db():\n populate_tables()", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)...
[ "0.6519952", "0.62828916", "0.61585516", "0.6156607", "0.61473525", "0.6075949", "0.5991981", "0.5991254", "0.5913686", "0.5912704", "0.5908939", "0.5908939", "0.5899569", "0.5880359", "0.5877914", "0.5871357", "0.5836821", "0.58103013", "0.57808405", "0.5778543", "0.57273936...
0.6568307
0
Class for handling all minidump symbolizing code on Android.
def __init__(self, dump_finder, build_dir, symbols_dir=None): # Map from minidump path (string) to minidump_dump output (string). self._minidump_dump_output = {} # Map from minidump path (string) to the directory that should be used when # looking for symbol binaries (string). self._minidump_symbol_binaries_directories = {} # We use the OS/arch of the host, not the device. super(AndroidMinidumpSymbolizer, self).__init__( platform.system().lower(), platform.machine(), dump_finder, build_dir, symbols_dir=symbols_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obfuscate():\r\n smali_file_list = u.load_smali_file() # Load smali files\r\n set()\r\n change_all_direct_method(\r\n set(\r\n find_all_direct_method(\r\n list(u.get_android_method_names()) + list(set(find_all_native_method(smali_file_list))),\r\n smali...
[ "0.5797563", "0.53851175", "0.5382028", "0.5153331", "0.4875175", "0.48596218", "0.47921395", "0.4589166", "0.45467687", "0.45267266", "0.45106924", "0.44272697", "0.441203", "0.43683136", "0.43274632", "0.43264234", "0.43178105", "0.43031862", "0.43031862", "0.42946658", "0....
0.6551352
0
Returns a list of paths to binaries where symbols may be located.
def GetSymbolBinaries(self, minidump): libraries = self._ExtractLibraryNamesFromDump(minidump) symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries) if not symbol_binary_dir: return [] return [os.path.join(symbol_binary_dir, lib) for lib in libraries]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_binaries():\n\n builddir = Path(__file__).parent.parent / \"builddir\"\n\n bins = []\n\n for folder in [\"examples\", \"tests\", \"tools\"]:\n for path in sorted((builddir / folder).rglob(\"*\")):\n if path.stem.startswith(\"xnvme_single\"):\n continue\n ...
[ "0.7554683", "0.65706384", "0.6247244", "0.6195482", "0.61567324", "0.61457145", "0.6059669", "0.60321474", "0.60022146", "0.60022146", "0.59460664", "0.5926765", "0.5923688", "0.5919705", "0.58894503", "0.5882842", "0.5870094", "0.5855827", "0.58501244", "0.5832909", "0.5810...
0.7863217
0
Extracts library names that may contain symbols from the minidump. This is a duplicate of the logic in Chromium's //build/android/stacktrace/crashpad_stackwalker.py.
def _ExtractLibraryNamesFromDump(self, minidump): default_library_name = 'libmonochrome.so' minidump_dump_output = self._GetMinidumpDumpOutput(minidump) if not minidump_dump_output: logging.warning( 'Could not get minidump_dump output, defaulting to library %s', default_library_name) return [default_library_name] library_names = [] module_library_line_re = re.compile(r'[(]code_file[)]\s+= ' r'"(?P<library_name>lib[^. ]+.so)"') in_module = False for line in minidump_dump_output.splitlines(): line = line.lstrip().rstrip('\n') if line == 'MDRawModule': in_module = True continue if line == '': in_module = False continue if in_module: m = module_library_line_re.match(line) if m: library_names.append(m.group('library_name')) if not library_names: logging.warning( 'Could not find any library name in the dump, ' 'default to: %s', default_library_name) return [default_library_name] return library_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]", "def get_pla...
[ "0.606551", "0.5674643", "0.56701034", "0.5526038", "0.5405917", "0.5370439", "0.5335965", "0.52976125", "0.52932", "0.52613264", "0.523408", "0.52253866", "0.5208155", "0.51849467", "0.51835585", "0.5180916", "0.51393825", "0.51383364", "0.513191", "0.5112789", "0.5104473", ...
0.74637324
0
Gets the directory that should contain symbol binaries for |minidump|.
def _GetSymbolBinaryDirectory(self, minidump, libraries): if minidump in self._minidump_symbol_binaries_directories: return self._minidump_symbol_binaries_directories[minidump] # Get the processor architecture reported by the minidump. arch = None matcher = re.compile(_PROCESSOR_ARCH_REGEX) for line in self._GetMinidumpDumpOutput(minidump).splitlines(): match = matcher.match(line) if match: arch = match.groupdict()['arch'].lower() break if not arch: logging.error('Unable to find processor architecture for minidump %s', minidump) self._minidump_symbol_binaries_directories[minidump] = None return None if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX: logging.error( 'Unsupported processor architecture %s for minidump %s. This is ' 'likely fixable by adding the correct mapping for the architecture ' 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.', arch, minidump) self._minidump_symbol_binaries_directories[minidump] = None return None # Look for a directory that contains binaries with the correct architecture. matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch]) symbol_dir = None for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES: possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir) if not os.path.exists(possible_symbol_dir): continue for f in os.listdir(possible_symbol_dir): if f not in libraries: continue binary_path = os.path.join(possible_symbol_dir, f) stdout = subprocess.check_output( ['file', binary_path], stderr=subprocess.STDOUT) if matcher.match(stdout): symbol_dir = possible_symbol_dir break if not symbol_dir: logging.error( 'Unable to find suitable symbol binary directory for architecture %s.' 'This is likely fixable by adding the correct directory to ' 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.', arch) self._minidump_symbol_binaries_directories[minidump] = symbol_dir return symbol_dir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bin_dir():\n return os.path.abspath(os.path.join(get_root_dir(), 'bin/'))", "def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\...
[ "0.6687989", "0.668011", "0.6666962", "0.6321019", "0.63035196", "0.6107489", "0.6062449", "0.6049191", "0.6031628", "0.6012374", "0.5988652", "0.5959031", "0.5932072", "0.5915141", "0.59118515", "0.59096825", "0.5886469", "0.5836009", "0.58122456", "0.58107346", "0.58003944"...
0.7879615
0
Runs minidump_dump on the given minidump. Caches the result for reuse.
def _GetMinidumpDumpOutput(self, minidump): if minidump in self._minidump_dump_output: logging.debug('Returning cached minidump_dump output for %s', minidump) return self._minidump_dump_output[minidump] dumper_path = local_first_binary_manager.GetInstance().FetchPath( 'minidump_dump') if not os.access(dumper_path, os.X_OK): logging.warning('Cannot run minidump_dump because %s is not found.', dumper_path) return None # Using subprocess.check_output with stdout/stderr mixed can result in # errors due to log messages showing up in the minidump_dump output. So, # use Popen and combine into a single string afterwards. p = subprocess.Popen( [dumper_path, minidump], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() stdout = stdout + '\n' + stderr if p.returncode != 0: # Dumper errors often do not affect stack walkability, just a warning. # It's possible for the same stack to be symbolized multiple times, so # add a timestamp suffix to prevent artifact collisions. now = datetime.datetime.now() suffix = now.strftime('%Y-%m-%d-%H-%M-%S') artifact_name = 'dumper_errors/%s-%s' % ( os.path.basename(minidump), suffix) logging.warning( 'Reading minidump failed, but likely not actually an issue. Saving ' 'output to artifact %s', artifact_name) artifact_logger.CreateArtifact(artifact_name, stdout) if stdout: self._minidump_dump_output[minidump] = stdout return stdout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testPullMinidumps(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n time_offset = platform_backend.GetDeviceHostClockOffset()\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(Get...
[ "0.57792044", "0.53705996", "0.52849925", "0.52743363", "0.5234966", "0.518306", "0.51442665", "0.5070806", "0.5030404", "0.49894577", "0.49146363", "0.49089596", "0.48556525", "0.4828357", "0.48120502", "0.47813582", "0.47782636", "0.4737786", "0.46923456", "0.46380943", "0....
0.6442122
0
Make sure message is formatted correctly.
def test_setting_failure(self): with mock.patch.object(ip_lib, 'set_ip_nonlocal_bind', return_value=1): ip_lib.set_ip_nonlocal_bind_for_namespace('foo', value=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format(self, message):", "def _formatMessage(self, msg, standardMsg):\r\n if not self.longMessage:\r\n return msg or standardMsg\r\n if msg is None:\r\n return standardMsg\r\n try:\r\n return '%s : %s' % (standardMsg, msg)\r\n except UnicodeDecodeE...
[ "0.7444605", "0.69293326", "0.690031", "0.67062855", "0.66425115", "0.64710456", "0.64351165", "0.62836003", "0.62724143", "0.61536384", "0.6117061", "0.6076582", "0.6061003", "0.6058752", "0.6057646", "0.60355955", "0.6029556", "0.60024786", "0.599572", "0.59727937", "0.5924...
0.0
-1
Calculate the reserve factor for Skin Buckling by calculating the critical load and comparing to the applied load. Return the reserve factor adjusted for inequalities based around ==0
def skinBuckle(dim): bst = dim[0] tst = dim[1] tsk = dim[2] epsilonk = kk*((tsk/bsk))**2 Et = (Esk*tsk)+(Est*((bst*tst)/bsk)) Nsk = Et*epsilonk # Critical Load rsf = Nsk/Nx return rsf - 1 # Using a target Reserve Factor of 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def calculate_effective_capacitance(self, load):\n c_load = loa...
[ "0.6202992", "0.59245586", "0.57835984", "0.57430667", "0.56664133", "0.56359935", "0.55884063", "0.55883443", "0.5531915", "0.54831076", "0.5470007", "0.54357696", "0.5415667", "0.5415524", "0.5412604", "0.5383835", "0.53750026", "0.5356168", "0.53061926", "0.52848005", "0.5...
0.5172448
27
Calculate the reserve factor for Stiffener Buckling by calculating the critical load and comparing to the applied load. Return the reserve factor adjusted for inequalities based around ==0
def stiffenerBuckle(dim): bst = dim[0] tst = dim[1] tsk = dim[2] epsilont = kt * ((tst / bst)) ** 2 Et = (Esk * tsk) + (Est * ((bst * tst) / bsk)) Nst = Et*epsilont # Critical Load rsf = Nst/Nx return rsf - 1 # Using a target Reserve Factor of 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_lower_boundary(self, divisor):\n\n # see how low you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n lowest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i,...
[ "0.6212213", "0.5960155", "0.59591246", "0.58988106", "0.57863295", "0.5742012", "0.56890213", "0.5643248", "0.5618332", "0.56007034", "0.55502635", "0.55463886", "0.5537825", "0.551547", "0.5508426", "0.54879606", "0.5471865", "0.54627365", "0.5456973", "0.545685", "0.544993...
0.49897957
86
Calculate the reserve factor for Material Failure by calculating the critical load and comparing to the applied load. Return the reserve factor adjusted for inequalities based around =>0
def matFail(dim): bst = dim[0] tst = dim[1] tsk = dim[2] Et = (Esk * tsk) + (Est * ((bst * tst) / bsk)) Nmat = Et*maxstrain # Critical Load rsf = Nmat/Nx return rsf - 1.1 # Using a target Reserve Factor of >=1.1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def fuel_required(mass):\n return max(math.floor(mass/3) - 2, 0)", "def getRepairCapacity(self):\n return int(self.myDesign.getSYCRequired() * (1-(self.strength/100.0)))", "def internal_external...
[ "0.5882945", "0.5823634", "0.57726854", "0.57467616", "0.5741599", "0.57414633", "0.5710871", "0.55506474", "0.54651403", "0.5430573", "0.5428441", "0.54128045", "0.53944397", "0.5361302", "0.535642", "0.53394634", "0.5322019", "0.53197694", "0.5309506", "0.5301232", "0.53002...
0.517416
33
Calculate the reserve factor for Euler Buckling Failure by calculating the critical load and comparing to the applied load. Return the reserve factor adjusted for inequalities based around =>0
def eulerBuckle(dim): bst = dim[0] tst = dim[1] tsk = dim[2] ZEAZ = (Est*bst*tst*((tsk/2)+(bst/2))) ZEA = (Est*bst*tst)+(Esk*tsk*bsk) zbar = ZEAZ/ZEA # Neutral Axis EIbar = ((Esk*bsk*(tsk**3))/12)+(Esk*bsk*tsk*(zbar**2))+((Est*tst*bst**3)/12)+\ (Est*bst*tst*(((bst/2)+(tsk/2)-zbar)**2)) # Using Parallel Axis Theorm NxEuler = ((math.pi**2)*EIbar)/(ribSpace**2*bsk) # Critical Load rsf = NxEuler/Nx return rsf - 1.1 # Using a target Reserve Factor of >=1.1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def calculate_lower_boundary(self, divisor):\n\n # see how low ...
[ "0.5978814", "0.58166504", "0.57432806", "0.5645041", "0.56362903", "0.5612008", "0.56099224", "0.55491924", "0.55470383", "0.55240196", "0.55133814", "0.5455415", "0.5440525", "0.5410518", "0.54084057", "0.54057515", "0.53936213", "0.5387816", "0.53826976", "0.5381602", "0.5...
0.555809
7
This function allows for testing of a set of dimensions, printing key info as a result.
def tester(dim): print("Unit Mass: " + str(mass(dim))) print("Skin Buckle RSF: " + str(skinBuckle(dim)+1)) print("Stiffener Buckle RSF: " + str(stiffenerBuckle(dim)+1)) print("Mat Fail RSF: " + str(matFail(dim)+1.1)) print("Euler Fail RSF: " + str(eulerBuckle(dim)+1.1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_dict_dimensions(entities_db, title='Title', message=''):\n total = 0\n m = f'{title}\\n'\n for k1, v1 in entities_db.items():\n m += f'\\n{message}{k1}:\\n'\n if isinstance(v1, dict):\n for k2, v2 in v1.items():\n if isinstance(v2, tuple):\n ...
[ "0.6196142", "0.6052784", "0.60204434", "0.60203207", "0.60203207", "0.5951763", "0.5832673", "0.5800634", "0.57597065", "0.57452214", "0.5724362", "0.57028264", "0.5698949", "0.5673566", "0.55920947", "0.55707216", "0.55707216", "0.55645055", "0.55363876", "0.55324614", "0.5...
0.5100301
95
The callback function for the optimize process. Manages recording of iteration values and monitoring current optimiser solutions.
def callbackMonitor(dim): bstRecord.append(dim[0]) tstRecord.append(dim[1]) tskRecord.append(dim[2]) massRecord.append(mass(dim)) rsfSkinRecord.append(skinBuckle(dim)+1) rsfStiffRecord.append(stiffenerBuckle(dim)+1) rsfMatRecord.append(matFail(dim)+1.1) rsfEulerRecord.append(eulerBuckle(dim)+1.1) print(dim, mass(dim))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Optimize(self):\n self._OpenOutputFiles()\n while self.n_iter < self.n_maxiter and not self.is_converged:\n self.n_iter += 1\n self._ChooseStepDirection(self.opt_type)\n self._LineSearch(-1.0 * self.step_dir)\n self._UpdateEnergy()\n self._UpdateGradient()\n self.traj.Append...
[ "0.65901256", "0.6476958", "0.64748776", "0.639814", "0.63832265", "0.6313094", "0.6264372", "0.6213871", "0.6205301", "0.61710924", "0.6140338", "0.6129752", "0.60806435", "0.6051773", "0.6009665", "0.60076827", "0.60076827", "0.5998235", "0.59467274", "0.5933865", "0.590929...
0.0
-1
About Nurevam or person by mention info
async def info(self,ctx,*,person:discord.Member = None): if not person: guild = len(self.bot.guilds) member = len(set(self.bot.get_all_members())) app = await self.bot.application_info() msg = "Name:{}".format(self.bot.user) if ctx.message.guild.me.nick: msg += "\nNickname:{}".format(ctx.message.guild.me.nick) msg += "\nCreator: {}".format(app.owner) msg += "\nServer:{}\nMembers:{}".format(guild,member) link = "If you want to invite this bot to your server, you can check it out here <http://nurevam.site>!" return await self.bot.say(ctx,content = "```xl\n{}\n```\n{}\n".format(msg,link)) else: e = discord.Embed() e.title = "{} - {}".format(person,person.id) e.set_thumbnail(url = person.avatar_url) e.add_field(name = "Created at", value="{} - ({})".format(person.created_at,self.get_time_delta(person.created_at)),inline=False) e.add_field(name = "Joined at", value="{} - ({})".format(person.joined_at,self.get_time_delta(person.joined_at)),inline=False) e.add_field(name = "Total Roles", value=str(len(person.roles)),inline=False) if person.colour.value: e.colour = person.color await self.bot.say(ctx,embed = e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mention(self) -> str:", "def mention(self) -> str:", "def display_person(person):\n name = person['name']\n followers = person['follower_count']\n description = person['description']\n country = person['country']\n print(f'{name}, a(n) {description}, from {country}.')\n return followers",...
[ "0.6522878", "0.6522878", "0.5954495", "0.5830652", "0.58269155", "0.5811767", "0.5793768", "0.5775662", "0.5751016", "0.5696697", "0.5679133", "0.55970377", "0.55823743", "0.556282", "0.5555041", "0.55549055", "0.5526657", "0.5525049", "0.55118614", "0.5511843", "0.5495225",...
0.5873194
3
Give info about this server
async def serverinfo(self,ctx): g = ctx.guild embed = discord.Embed() embed.set_thumbnail(url = g.icon_url) embed.title = "{} - {}".format(g.name,g.id) embed.add_field(name = "Owner",value="{} - {}".format(g.owner,g.owner.id),inline=False) embed.add_field(name = "Created at", value = str(g.created_at), inline=False) embed.add_field(name = "Total Roles", value= str(len(g.roles)), inline=False) embed.add_field(name = "Total Members", value= str(g.member_count), inline=False) embed.add_field(name = "Premium Member", value= str(g.premium_subscription_count), inline=False) embed.add_field(name = "Premium Tier", value= str(g.premium_tier), inline=False) await self.bot.say(ctx,embed = embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_serverinfo(self, server):\n print('QManager server:', self._server)\n server_info = self._qm.get_server_info()\n for k, v in server_info.items():\n print(' %s: %s' % (k, v))", "def server_info(ctx):\n data = ctx.obj.get_server_info()\n output_json_data(data)", "def...
[ "0.8096221", "0.80244446", "0.7995032", "0.7823503", "0.7384904", "0.7376968", "0.72559196", "0.7244653", "0.7027462", "0.70059526", "0.6965059", "0.69347584", "0.6865889", "0.68443227", "0.6838706", "0.6834391", "0.68235415", "0.68076944", "0.67965454", "0.67949927", "0.6736...
0.71475893
8
Type !help {command} for more info on a command. You can also type !help {category} for more info on a category. For example, !help level (If you have level plugin enable!)
async def command(self,ctx): await ctx.send("Yes this is a command.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help_help(self):\n print(\"List commands or print details about a command\")", "def command_help(self, command):\n self.commands[command].command_help()", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not n...
[ "0.8511808", "0.8454938", "0.83023393", "0.82100123", "0.8172162", "0.8119534", "0.8045609", "0.7997151", "0.79934657", "0.79888207", "0.7869247", "0.7839074", "0.7831564", "0.7807893", "0.7801458", "0.77975965", "0.77936643", "0.7725134", "0.76936215", "0.7667932", "0.766793...
0.0
-1
Type !help command for additional info on a command. You can also type !help category for additional info on a category. For example, type !help Level (If you have the level plugin enable!)
async def category(self,ctx): await ctx.send("Yes this is a category.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help_help(self):\n print(\"List commands or print details about a command\")", "def command_help(self, command):\n self.commands[command].command_help()", "def _help(self):\n self.onecmd('help')", "def cmd_help(args):", "def help():\n \n pass", "def help():", "def com...
[ "0.84459895", "0.81877965", "0.81058455", "0.8060183", "0.80382925", "0.803564", "0.7979417", "0.79499114", "0.79247713", "0.79205954", "0.7903753", "0.7890884", "0.7862196", "0.78568035", "0.7844785", "0.78190106", "0.78190106", "0.77970946", "0.77970946", "0.7784749", "0.77...
0.0
-1
Red = Disable Blue = Enable Any problem such as plugins on dashboard is enable but show disable here, info Owner
async def plugin(self,ctx): special_case = {"Anime":"myanimelist","Anti Raid":"antiraid"} plugin_setting = await self.redis.hgetall("{}:Config:Cogs".format(ctx.message.guild.id)) embed = discord.Embed() cogs = self.bot.cogs.keys() for x in cogs: setting = u"\U0001F534" #red if x in ("Core", "Remindme", "Tools", "REPL","Events"): # A Owner's thing only. if ctx.message.author.id != self.bot.owner.id: continue setting = u"\U0001F535" #blue if x.lower() in plugin_setting or special_case.get(x) in plugin_setting: setting = u"\U0001F535" #blue embed.add_field(name = x,value = setting) if ctx.message.guild.me.colour.value: embed.colour = ctx.message.guild.me.colour embed.set_footer(text = "{} = Disable | {} = Enable".format(u"\U0001F534",u"\U0001F535")) await ctx.send(embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_enabled(self):", "def check_disabled(self):\n return None", "def get_status(self):\n return super(Cabling, self).get_status()", "def enable(self):", "def Enabled(self) -> bool:", "def disable(self):", "def isEnabled(self):", "def disable_feature(self,reason,source=\"gff3_maniger\...
[ "0.64461994", "0.61870736", "0.6155342", "0.606294", "0.60158396", "0.59407747", "0.5909308", "0.5869749", "0.5851598", "0.58354104", "0.5772434", "0.5748269", "0.574007", "0.5675615", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", ...
0.65774465
0
store input into filename used pickle.dump
def store (input, filename) : cout = open (filename, 'w') pickle.dump (input, cout) cout.close ()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store(self, filename):", "def pickle(self,data,filename):\n pickle.dump(data, open(filename, 'wb'))", "def save_file_data(name, obj, input_path='/inputs'):\n filename = '{}/{}.pkl'.format(input_path, name)\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedi...
[ "0.7272676", "0.7110179", "0.69229895", "0.67838925", "0.6766273", "0.67407393", "0.67352134", "0.6724542", "0.6718559", "0.6689364", "0.6684626", "0.6666956", "0.6617081", "0.6609867", "0.65701944", "0.65609634", "0.6513856", "0.6507621", "0.6492011", "0.64900124", "0.648344...
0.89019084
0
load data from filename used pickle.load
def grab (filename) : cin = open (filename, 'r') return pickle.load (cin)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(filename):\n with open(filename,'rb') as f:\n return pickle.load(self,f)", "def load_data(file_name):\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n return data", "def load_pickle_data(filename):\n path = \"../tmp/{}.pckl\".format(filename)\n if os.pa...
[ "0.7869546", "0.7829951", "0.7812556", "0.77086544", "0.76826143", "0.7677001", "0.76607376", "0.7654821", "0.7640084", "0.7521244", "0.75156707", "0.7463171", "0.7457279", "0.743854", "0.7430627", "0.7389005", "0.7344408", "0.73272413", "0.7275342", "0.72668594", "0.7232986"...
0.68825763
58
Sends args and kwargs to any configured callbacks. This handles the cases where the 'callbacks' variable is ``None``, a single function, or a list.
def _multiple_callbacks(callbacks, *args, **kwargs): if isinstance(callbacks, list): for cb in callbacks: cb(*args, **kwargs) return if callbacks: callbacks(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_callbacks(self, **kwargs):\n for callback in self.CALLBACKS:\n getattr(self, callback)(**kwargs)", "def callbacks(*args, addCallback: Script=None, clearAllCallbacks: bool=True, clearCallbacks:\n bool=True, describeHooks: bool=True, dumpCallbacks: bool=True, executeCallbacks...
[ "0.702277", "0.66514504", "0.65428776", "0.651266", "0.6477689", "0.61787194", "0.6089447", "0.60846204", "0.60464215", "0.60348064", "0.59095436", "0.590122", "0.5900241", "0.5882658", "0.5858614", "0.5747529", "0.57051486", "0.5680165", "0.56781036", "0.5653725", "0.5636546...
0.7574884
0
Adds and connects attributes from default encore FKIK switch anim setup to rig nodes in scene Imports default control setup from file or you may specify source_ctrl in args to override
def make_fkikSwitch_connection_attrs(partpre=None, side='Lt', source_ctrl=None, tag_name='switch', snapTo=None, add_attrs=None): switch_anim = '' if source_ctrl is not None: switch_anim = source_ctrl partpre = partpre if partpre == '': partpre = 'mypart_' if source_ctrl is None: # filepath = r'C:/Users/Nicob/Documents/maya/scripts/rigBot/rigBot/config/switcher_anim.mb' system_base_path = os.path.dirname(utils.__file__) base_path = os.path.join(system_base_path, 'config') file_path = os.path.join(base_path, 'switcher_anim.mb') newnodes = mc.file(filepath, i=1, ignoreVersion=1, rnn=1, mergeNamespacesOnClash=0, rpr=partpre, ra=1, options="v=0;", pr=1) switch_anim = partpre + '_CTL' # pos switcher grpOffset node if snapTo if snapTo is not None: utils.snap_to_transform(snapTo, switch_anim.replace('CTL', 'grpOffset')) mc.setAttr(switch_anim.replace('CTL', 'grpOffset') + '.r', 0, 0, 0) # get value of tags and sort into ik and fk vis groups iks = [] fks = [] nodes = mc.ls('*.' + tag_name) for node in nodes: if partpre in node and side in node: mode = mc.getAttr(node) if mode: mode = mode.lower() if 'ik' in mode: iks.append(node.split('.')[0]) if 'fk' in mode: fks.append(node.split('.')[0]) for ik in iks: # ikparpar=utils.get_parent(ik) ikpar = utils.get_parent(ik) if ikpar is None: mc.connectAttr(switch_anim + '.FK_IK', ik + '.visiblity', f=1) else: mc.connectAttr(switch_anim + '.FK_IK', ikpar + '.visibility', f=1) rvn = mc.createNode('reverse', name=switch_anim + '_fkik_vis_rv') mc.connectAttr(switch_anim + '.FK_IK', rvn + '.inputX') for fk in fks: fkpar = utils.get_parent(fk) if fkpar: mc.connectAttr(rvn + '.outputX', fkpar + '.visibility', f=1) if add_attrs is not None: for att in add_attrs: mc.addAttr(switch_anim, ln=att, min=0, max=1, dv=0, k=1) nns = [] for nn in reversed(newnodes): nnn = '' sn = nn.split("|") nnn = mc.rename(nn, sn[-1]) nns.append(nnn) anim = mc.ls(partpre + '_CTL') # if mc.objExists (partpre+'_skeleton_grp'): # mc.parent (anim, partpre+'_skeleton_grp' ) return anim
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name(...
[ "0.5719306", "0.5415949", "0.5388942", "0.5385029", "0.5336632", "0.5299819", "0.5200528", "0.5145229", "0.5140653", "0.51194525", "0.5079638", "0.50723904", "0.50585204", "0.50376135", "0.5010488", "0.49763772", "0.49707508", "0.49594924", "0.4954051", "0.4949411", "0.494735...
0.5789509
0
This will be used for FK IK matching
def create_snapto_node(ctrl, jnt): snapto = ctrl+'_SNAPTO' if mc.objExists(snapto): jnt = utils.get_parent(snapto) mc.delete(snapto) snapto_grp = mc.duplicate(ctrl, po=1, n=ctrl+'_SNAPTO_GRP')[0] snapto = mc.duplicate(ctrl, po=1, n=ctrl+'_SNAPTO')[0] utils.set_attrs([snapto_grp, snapto], k=1, l=0) mc.parent(snapto_grp, jnt) mc.parent(snapto, snapto_grp) ua = mc.listAttr(snapto, ud=1) for a in ua: try: mc.setAttr(snapto+'.'+a, l=0) mc.deleteAttr(snapto+'.'+a) mc.setAttr(snapto_grp+'.'+a, l=0) mc.deleteAttr(snapto_grp+'.'+a) except: pass return snapto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ikFkMatch(\n namespace,\n ikfk_attr,\n ui_host,\n fks,\n ik,\n upv,\n ik_rot=None,\n key=None):\n\n # returns a pymel node on the given name\n def _get_node(name):\n # type: (Text) -> pm.nodetypes.Transform\n name = anim_utils.stripNam...
[ "0.64621747", "0.6233758", "0.6001383", "0.5866673", "0.5858059", "0.57838565", "0.5618625", "0.5599867", "0.55876", "0.55551654", "0.54261523", "0.54143274", "0.54062307", "0.5399775", "0.5396218", "0.5388583", "0.53567606", "0.52986205", "0.52962285", "0.52947336", "0.52704...
0.0
-1