query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Add headers to both force latest IE rendering engine or Chrome Frame, and also to cache the rendered page for 10 minutes.
def add_header(r): r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" r.headers["Pragma"] = "no-cache" r.headers["Expires"] = "0" r.headers['Cache-Control'] = 'public, max-age=0' return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=60'\n return response", "def add_header(response):\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, m...
[ "0.82239413", "0.8223081", "0.8223081", "0.8223081", "0.8223081", "0.8196386", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", "0.8163827", ...
0.71290123
63
Returns a list of the currently connected playes (on the MC server). First tries to hit the cache to see if this has been checked recently. If there is no cache entry, queries the Minecraft server's zombiepygman API to get the list of currently connected players.
def _get_connected_player_list(self): if not zpgapi.is_zgp_api_enabled(): # API is not configured, skip this. return [] cache_key = 'api_connected_players' cache_val = cache.get(cache_key) if cache_val != None: return cache_val api = zpgapi.get_zpg_api_iface() try: api_response = api.cmd_list_connected() cache_val = api_response['player_list'] except urllib2.URLError: # Error with zombiepygman. # This will get cached, but that's OK. It will prevent request # pileup on the gunicorn workers. cache_val = [] cache.set(cache_key, cache_val, 60) return cache_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_players(self):\n return self.server.status().players.online", "def get_players():\n nfl_players = redis_cache('nfl_players_key', NFL_Player_2015.query.all)\n return nfl_players", "def players(self):\n return self.currents.player", "def get_players(self):\n\n # Append the cu...
[ "0.7143071", "0.68768233", "0.6365127", "0.635414", "0.62231076", "0.60774004", "0.6067745", "0.60203606", "0.6001943", "0.5917321", "0.5900113", "0.58974326", "0.5869177", "0.5867893", "0.5851105", "0.58382463", "0.5821852", "0.57409716", "0.5719336", "0.57017183", "0.565462...
0.82015663
0
Ensure the value of 'done' is set to False when creating an item
def test_done_default_value_is_False(self): item = Item(name = "A test item") self.assertEqual(item.name, "A test item") self.assertFalse(item.done)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_done_value_can_be_set_to_True(self):\n item = Item(name = \"A test item\", done = True)\n self.assertEqual(item.name, \"A test item\")\n self.assertTrue(item.done)", "def test_create(self):\n Todo = self.env[\"todo.task\"]\n task = Todo.create({'name': 'Test Task'})\n ...
[ "0.73595536", "0.69080955", "0.6256873", "0.6184523", "0.61320096", "0.60282576", "0.6025874", "0.59891725", "0.58720154", "0.5870808", "0.58685875", "0.5864126", "0.5850043", "0.5843042", "0.580223", "0.5788047", "0.5772935", "0.5769292", "0.5767891", "0.5756782", "0.5730310...
0.7237571
1
Ensure the value of 'done' is True when set to True when creating an item
def test_done_value_can_be_set_to_True(self): item = Item(name = "A test item", done = True) self.assertEqual(item.name, "A test item") self.assertTrue(item.done)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_done_default_value_is_False(self):\n item = Item(name = \"A test item\")\n self.assertEqual(item.name, \"A test item\")\n self.assertFalse(item.done)", "def test_create(self):\n Todo = self.env[\"todo.task\"]\n task = Todo.create({'name': 'Test Task'})\n self.as...
[ "0.7198816", "0.6800873", "0.65567577", "0.6417597", "0.6363705", "0.6359525", "0.6172014", "0.6137984", "0.61083496", "0.6047177", "0.60278106", "0.6007678", "0.59839237", "0.59442586", "0.590649", "0.5906427", "0.5846286", "0.5799823", "0.5794618", "0.5780171", "0.57660437"...
0.76738805
0
Ensure the string value of the object is equal to the item name
def test_object_name_is_equal_to_item_name(self): item = Item(name = "A test item") self.assertEqual(str(item), "A test item")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_str(self):\n item = self.item\n\n self.assertEqual(str(item), self.item_raw['name'])", "def _valid_object_with_name(ui_object):\n return ui_object.obj_name", "def test_name(self):\n self.assertTrue(type(x.name) == str)", "def test_values_single(self):\n input_item = self...
[ "0.72171146", "0.6880246", "0.6773977", "0.6383563", "0.6352", "0.6319917", "0.62903845", "0.6150476", "0.60850054", "0.60359854", "0.60222614", "0.5974848", "0.5964395", "0.59447414", "0.5942534", "0.59195846", "0.59191364", "0.5898834", "0.58966243", "0.5865687", "0.5844299...
0.7843017
0
Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky ifelse logic here.
def build_evaluator(cls, cfg, dataset_name, output_folder=None): if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if evaluator_type == "sem_seg": return SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, num_classes=4, ignore_label=255 ) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() >= comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) if len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_s...
[ "0.71811223", "0.7178588", "0.7118122", "0.62564474", "0.6133015", "0.5892592", "0.5807464", "0.5648055", "0.5615826", "0.5607056", "0.55071527", "0.54952985", "0.5480512", "0.5452857", "0.54165035", "0.54085374", "0.5403301", "0.5399699", "0.5389954", "0.5385437", "0.5375982...
0.69981503
3
Create a postvalidator function that makes sure the value of this item is a key in the sibling dictionary 'sib_name'. Raises a ValueError if not. This generally assumes siblings[sib_name] is a required CategoryElement.
def is_sib_key(sib_name): def is_sib_key_val(siblings, value): if value not in siblings[sib_name].keys(): raise ValueError( "Must be a key of {}, but got {}" .format(sib_name, value)) return value return is_sib_key_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self):\n for search_tag_name in self.get_search_tag_names():\n search_tag_obj = Tag(search_tag_name)\n for search_tag_value in self.get_search_tag_values(search_tag_name):\n for new_tag_name in self.get_new_tag_names(search_tag_name, search_tag_value):\n ...
[ "0.45828247", "0.4572428", "0.45380762", "0.44724888", "0.42696497", "0.42584473", "0.42416134", "0.41420826", "0.41245428", "0.41224957", "0.4097658", "0.4080648", "0.40750405", "0.40694186", "0.40613383", "0.4061312", "0.40393326", "0.40243196", "0.3988627", "0.3969611", "0...
0.62593156
0
89 / 89 test cases passed.
def maxTurbulenceSize(self, arr: List[int]) -> int: if len(arr) == 1: return 1 ret = 1 tmp_ret = 0 last_flag = None for i in range(1, len(arr)): if arr[i] == arr[i - 1]: current_flag = None else: current_flag = arr[i] > arr[i - 1] if current_flag is None: ret = max(ret, tmp_ret) tmp_ret = 1 elif last_flag is None or last_flag == current_flag: ret = max(ret, tmp_ret) tmp_ret = 2 else: tmp_ret += 1 last_flag = current_flag return max(ret, tmp_ret)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_T01():", "def test_T4():", "def test_T4():", "def exercise_b2_106():\r\n pass", "def exercise_b2_113():\r\n pass", "def test_basic(self):\n self.assertEqual(solution(\"\"\"11111\n19991\n19191\n19991\n11111\"\"\"), 6)\n self.assertEqual(solution(\"\"\"5483143223\n2745854711\n5...
[ "0.6906367", "0.6610178", "0.6610178", "0.65972733", "0.6592675", "0.65818673", "0.6549294", "0.6506812", "0.64938533", "0.64341944", "0.6431647", "0.6431647", "0.6419195", "0.6403939", "0.6397478", "0.6384199", "0.63813823", "0.6361559", "0.6293006", "0.62877226", "0.625898"...
0.0
-1
Try find newer version for test. if next version has already been failed, stop further upgrades.
def increase_version(self): # type: (...) -> bool if self.type == RequirementType.FINAL_LATEST_VERSION: return False # get the latest version that may work if self.type == RequirementType.NOT_LATEST_VERSION: # get version between the current and last version_to_test = self.package.get_middle_version( self.version, self.error_version) if not version_to_test: self.type = RequirementType.FINAL_LATEST_VERSION return False else: version_to_test = self.package.last_version if version_to_test == self.version: # The latest version is already installed return False self.previous_version = self.version self.version = version_to_test return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upgrade_to_latest_but_same_version(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\...
[ "0.7359393", "0.7041746", "0.68207544", "0.6809215", "0.6583295", "0.6510883", "0.64602184", "0.6447396", "0.64244986", "0.6342098", "0.63395315", "0.63182235", "0.63094175", "0.63028187", "0.63012606", "0.6289545", "0.62604177", "0.6240322", "0.61509264", "0.61496556", "0.61...
0.6330563
11
get requirements file line.
def get_line(self): # type: () -> str line = "{}=={}".format(self.name, self.version) if self.type != RequirementType.LATEST_VERSION: line += ' # ' + TEMPLATES[self.type] if self.type == RequirementType.NOT_LATEST_VERSION: line = line.replace(r'(\S*)', self.error_version) return line + '\n'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None", "def find_requirements():\n with open(\"requirements.txt\", 'r') as f:\n return f.read()....
[ "0.6740572", "0.6701391", "0.65815175", "0.64647526", "0.6415879", "0.6412485", "0.6349819", "0.63256943", "0.63117", "0.6291996", "0.62328476", "0.6201596", "0.6177119", "0.6160064", "0.6157574", "0.615133", "0.6138522", "0.6128042", "0.6090447", "0.6074949", "0.6072613", ...
0.6781809
0
Split line on text and comment
def split_line(self, line): # type: (str) -> tuple parts = [s.strip() for s in line.split('#', 1)] package = parts[0] comment = parts[1] if len(parts) >= 2 else '' return package, comment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_comment(cls, code):\r\n if '#' not in code: return code\r\n #: Remove comments only (leave quoted strings as they are)\r\n subf = lambda m: '' if m.group(0)[0]=='#' else m.group(0)\r\n return re.sub(cls.re_pytokens, subf, code)", "def to_multi_line_comment(text: str) -> str:...
[ "0.6692685", "0.66081315", "0.65925145", "0.6545137", "0.6362264", "0.6226867", "0.62236226", "0.6117526", "0.6067819", "0.60001516", "0.59967846", "0.5966611", "0.59597796", "0.5954498", "0.59440887", "0.59440887", "0.5927107", "0.5925445", "0.59013635", "0.5877957", "0.5875...
0.6687648
1
Calculate cosine distance between two vector
def findCosineDistance(vector1, vector2): vec1 = vector1.flatten() vec2 = vector2.flatten() a = np.dot(vec1.T, vec2) b = np.dot(vec1.T, vec1) c = np.dot(vec2.T, vec2) return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cosine_distance(u, v):\n #print u,v\n return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))", "def cosine_distance(u, v):\n return numpy.dot(u, v) / (math.sqrt(numpy.dot(u, u)) * math.sqrt(numpy.dot(v, v)))", "def cosine_similarity(v1: Vector, v2: Vector) -> float:\n ...
[ "0.8144758", "0.806103", "0.7972851", "0.79563916", "0.7951864", "0.77495205", "0.7739488", "0.7737394", "0.7714112", "0.7667365", "0.76573527", "0.7630341", "0.7616587", "0.76105106", "0.76048666", "0.7574436", "0.7551911", "0.7540636", "0.7515504", "0.7509719", "0.7507556",...
0.8303037
0
Verify the similarity of one vector to group vectors of one class
def CosineSimilarity(test_vec, source_vecs): cos_dist = 0 for source_vec in source_vecs: cos_dist += FacePredictor.findCosineDistance(test_vec, source_vec) return cos_dist / len(source_vecs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def test_dice_similarity_compiled():\n vector1 = np.array([1, 1, 0, 0])\n vector2 = ...
[ "0.750954", "0.66709244", "0.65119016", "0.6507448", "0.65028244", "0.6483979", "0.6449786", "0.64489347", "0.6439888", "0.64329153", "0.6320735", "0.6308931", "0.63024855", "0.62732375", "0.6246762", "0.6246572", "0.6237279", "0.6201954", "0.61760354", "0.6173955", "0.617291...
0.55804986
100
Method to compile the BigQuery specific script execution command.
def generate_provider_specific_cmd_list(script, driver, output, error): cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id, script, output, error] return cmd_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_script_command(command_name,\n parent_shell,\n bootstrap_script,\n container_path,\n scripts_path,\n script):\n script_fragment = \"\\\"{}\\\"\".format(script) if script ...
[ "0.58411306", "0.5736632", "0.55719966", "0.5505636", "0.54719853", "0.5344112", "0.5325198", "0.5318884", "0.53028625", "0.5297254", "0.528932", "0.5273765", "0.5254984", "0.5248845", "0.5227913", "0.51987875", "0.5188278", "0.51837045", "0.5174086", "0.5140885", "0.5131505"...
0.0
-1
Add index operation with name to the operations given.
def add_index_operation(self, name, operations): if name not in self._index_operations: self._add_io(name, operations) else: raise AttributeError("An index operation with the name {} was already taken".format(name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuil...
[ "0.68858343", "0.6698154", "0.64274466", "0.6417586", "0.63482445", "0.6007961", "0.5947242", "0.5861968", "0.5853543", "0.57812166", "0.57532567", "0.57404816", "0.57012236", "0.5667653", "0.5642528", "0.5634883", "0.56331265", "0.5619371", "0.55785316", "0.5549118", "0.5546...
0.85664165
0
Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param.
def _offset_for(self, param): if param.has_parent(): p = param._parent_._get_original(param) if p in self.parameters: return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0) return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def offset(self):\n return self.__offset", "def offset(self):\n return self.__offset", "def get_offset(self):\n return self.offset", "def wm_offset(self):\n return self.get_par(\"offset\")", "def offset(self):\n return self._offset", "def offset(self):\n return self....
[ "0.65088356", "0.65088356", "0.6396925", "0.63015145", "0.6291252", "0.6291252", "0.6291252", "0.6291252", "0.6291252", "0.6291252", "0.6291252", "0.62532693", "0.62532693", "0.62532693", "0.62496376", "0.62265295", "0.61739165", "0.61539537", "0.61539537", "0.61539537", "0.6...
0.79988617
1
get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work
def _raveled_index_for(self, param): from ..param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raveled_index_for(self, param):\n from .param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def _raveled_index_for_trans...
[ "0.71273017", "0.6010891", "0.59065056", "0.5804188", "0.58011395", "0.57862777", "0.57440937", "0.57440937", "0.57353306", "0.56626016", "0.56598043", "0.5625494", "0.5600237", "0.5588847", "0.5567432", "0.55587304", "0.55522966", "0.554781", "0.55181646", "0.5507306", "0.55...
0.71871
0
get the raveled index for a param for the transformed parameter array (optimizer array). that is an int array, containing the indexes for the flattened param inside this parameterized logic. !Warning! be sure to call this method on the highest parent of a hierarchy, as it uses the fixes to do its work. If you do not know what you are doing, do not use this method, it will have unexpected returns!
def _raveled_index_for_transformed(self, param): ravi = self._raveled_index_for(param) if self._has_fixes(): fixes = self._fixes_ ### Transformed indices, handling the offsets of previous fixes transformed = (np.r_[:self.size] - (~fixes).cumsum()) return transformed[ravi[fixes[ravi]]] else: return ravi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raveled_index_for(self, param):\n from ..param import ParamConcatenation\n if isinstance(param, ParamConcatenation):\n return np.hstack((self._raveled_index_for(p) for p in param.params))\n return param._raveled_index() + self._offset_for(param)", "def _raveled_index_for(self...
[ "0.73500043", "0.72873694", "0.5873878", "0.5873878", "0.58434486", "0.56710553", "0.5656544", "0.5656544", "0.5554051", "0.554955", "0.5519457", "0.5428711", "0.536669", "0.5312925", "0.5258314", "0.5250333", "0.51969874", "0.51883346", "0.5186911", "0.51863664", "0.5186337"...
0.69185156
2
Flattened array of ints, specifying the index of this object. This has to account for shaped parameters!
def _raveled_index(self): return np.r_[:self.size]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def flatten_idx(idx, axis=-1):\n idx = numpy.asanyarray(idx)\n if not idx.dtype.kind in ('i', 'u'):\n idx = idx.astype(int)\n preshape = idx.shape[:axis]\n postshape = idx.shape[axis:]\n stride = int(numpy.product...
[ "0.65175533", "0.62058824", "0.5974461", "0.59025675", "0.58936995", "0.58421", "0.58367753", "0.57612103", "0.5670626", "0.5655063", "0.5648909", "0.56180966", "0.5589195", "0.55857176", "0.55857176", "0.5580362", "0.55702096", "0.5560747", "0.544082", "0.5435741", "0.541882...
0.53458685
27
Helper preventing copy code. This adds the given what (transformation, prior etc) to parameter index operations which. reconstrained are reconstrained indices. warn when reconstraining parameters if warning is True.
def _add_to_index_operations(self, which, reconstrained, what, warning): if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warn_inplace(exc, nav, repl_pairs, local_opt):\r\n if isinstance(exc, InconsistencyError):\r\n return\r\n return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt)", "def ensure_default_constraints(self,warn=False):\n positive_strings = ['variance','lengthscale', 'precis...
[ "0.6421251", "0.57638377", "0.53960013", "0.5364344", "0.52853006", "0.52772886", "0.52772886", "0.52772886", "0.52637357", "0.52537453", "0.52390754", "0.5176815", "0.51558375", "0.514963", "0.51430506", "0.5140084", "0.5106313", "0.5041951", "0.49778527", "0.4977233", "0.49...
0.71344423
1
Helper preventing copy code. Remove given what (transform prior etc) from which param index ops.
def _remove_from_index_operations(self, which, transforms): if len(transforms) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) for t in list(transforms): unconstrained = which.remove(t, self._raveled_index()) removed = np.union1d(removed, unconstrained) if t is __fixed__: self._highest_parent_._set_unfixed(self, unconstrained) return removed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_operator(self, operator):", "def remove_extra_index_from_context_actions(context_action_dict):\n keys_to_keep = {'initial_value', 'replacement_value'}\n for question in context_action_dict:\n for obj_dct in context_action_dict[question]:\n total_keys = set(obj_dct.keys())\n ...
[ "0.5864179", "0.57993364", "0.5767263", "0.5759645", "0.57012475", "0.5697558", "0.5676129", "0.56178457", "0.559176", "0.55809194", "0.5571276", "0.5533166", "0.5462391", "0.5455604", "0.54388994", "0.54100037", "0.539974", "0.5399218", "0.53672886", "0.53319484", "0.5327874...
0.65478945
1
Emit a JSON representation of a given row
def format(self, row): return json.dumps(row.print_fields)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, row: Optional[Any] = None):\n self.fout.write('{}\\n'.format(json.dumps(row, cls=self.encoder)))", "def row_to_json(row: sqlite3.Row) -> str:\n d = {}\n for key in row.keys():\n d[key] = row[key]\n\n return json.dumps(d)", "def __data_row_to_json(self, row):\n raw_...
[ "0.745525", "0.7241681", "0.72320414", "0.6897235", "0.68846", "0.6736316", "0.65954936", "0.65526325", "0.6160118", "0.61541754", "0.6141491", "0.61156374", "0.6115557", "0.6070374", "0.60693794", "0.6057097", "0.5938166", "0.5934188", "0.58886075", "0.58313084", "0.5826782"...
0.7718322
0
return a header for the row
def header(self, fields): return fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _horizontal_header(self):\n return self.header()", "def _horizontal_header(self):\n return self.header()", "def __get_header_tags(self):\n tag = \"<th>{}</th>\"\n\n return (tag * len(self.__rows)).format(*self.__rows)", "def _generateRowHeader(self, obj, **args):\n resu...
[ "0.7744287", "0.7744287", "0.7658061", "0.7620999", "0.75094855", "0.74332565", "0.73744935", "0.73059195", "0.7254804", "0.7210276", "0.7183111", "0.7131767", "0.71054065", "0.7100797", "0.7089827", "0.69185275", "0.69134706", "0.6910354", "0.6902813", "0.6865558", "0.685388...
0.62879777
73
Populate the class with the json info
def populate(self, fid1, fid2): self.input1 = json.load(fid1) self.input2 = json.load(fid2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, json):\n\n self.id = json[\"id\"]\n self.alternateId = json[\"alternateId\"]\n self.name = json[\"name\"]\n\n if \"description\" in json:\n self.description = json[\"description\"]\n\n if \"episodeCount\" in json:\n self.episodeCount = jso...
[ "0.7457732", "0.74043804", "0.7343072", "0.71182615", "0.7032978", "0.6789153", "0.66915345", "0.66697764", "0.66629", "0.66505814", "0.66464657", "0.6644336", "0.6605952", "0.66035146", "0.65517664", "0.65442574", "0.6533766", "0.65309536", "0.65301555", "0.65257514", "0.652...
0.0
-1
Creates a dictonary of nodes listed by currie id from answers 1 and 2
def make_node_dict(self): if self.input1 is None or self.input2 is None: raise Exception("Missing input: please run the populate() method first") self.node_dict1 = {} for node in self.input1['knowledge_graph']['nodes']: self.node_dict1[node['id']] = node self.node_dict2 = {} for node in self.input2['knowledge_graph']['nodes']: self.node_dict2[node['id']] = node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nodes_by_id(ntwrk, nodeid):\r\n return {k: v for el in ntwrk\r\n for k, v in el.items() if k == nodeid}", "def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if s...
[ "0.5874153", "0.5803899", "0.5788026", "0.5769955", "0.56848216", "0.56081706", "0.5563815", "0.55347794", "0.5533629", "0.55147934", "0.5420318", "0.53809404", "0.53226113", "0.53205335", "0.52443105", "0.52339405", "0.5148043", "0.5130552", "0.5121005", "0.5114256", "0.5098...
0.60742897
0
Runs through all of the nodes in the json responses storing the intersection and set differences into a dictonary organized by tuples of node ids or the tuple (1, 1) for all nodes.
def node_diff(self): if self.input1 is None or self.input2 is None: raise Exception("Missing input: please run the populate() method first") if self.node_dict1 is None or self.node_dict2 is None: self.make_node_dict() # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2 g1={} g2={} # Set to keep track of the union of all curie ids curie_set = set() for curie in self.node_dict1.keys(): g1[curie] = {} # intersection is only in the g1 dictionary g1[curie]['intersection'] = set() # node section keeps track of node ids associated with each node i.e. "n0" g1[curie]['node'] = set() curie_set.add(curie) for curie in self.node_dict2.keys(): g2[curie] = {} # node section keeps track of node ids associated with each node i.e. "n0" g2[curie]['node'] = set() curie_set.add(curie) node_names1 = [] node_names2 = [] # extract all node ids (i.e. "n0","n1",ect...) if len(self.input1['question_graph']['nodes'])>0: if 'id' in self.input1['question_graph']['nodes'][0]: node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']] elif 'node_id' in self.input1['question_graph']['nodes'][0]: node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']] if len(self.input2['question_graph']['nodes'])>0: if 'id' in self.input2['question_graph']['nodes'][0]: node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']] elif 'node_id' in self.input2['question_graph']['nodes'][0]: node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']] # initialize the result dictonary diff_dict = {} diff_dict["-1|-1"] = {'intersection':[],'g1-g2':[],'g2-g1':[]} # initialize node id tuple keys for id1 in node_names1: for id2 in node_names2: diff_dict[id1+"|"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]} # iterate through answers for answer1 in self.input1['answers']: for answer2 in self.input2['answers']: for id1 in answer1['node_bindings'].keys(): # This is to handle cases where answer node id has a list or a string if isinstance(answer1['node_bindings'][id1], str): bindings1 = [answer1['node_bindings'][id1]] elif isinstance(answer1['node_bindings'][id1], list): bindings1 = answer1['node_bindings'][id1] for curie1 in bindings1: # store node id g1[curie1]['node'].add(id1) for id2 in answer2['node_bindings'].keys(): # This is to handle cases where answer node id has a list or a string if isinstance(answer2['node_bindings'][id2], str): bindings2 = [answer2['node_bindings'][id2]] elif isinstance(answer2['node_bindings'][id2], list): bindings2 = answer2['node_bindings'][id2] for curie2 in bindings2: # store node id g2[curie2]['node'].add(id2) if curie1 == curie2: # stor intersection tuple g1[curie1]['intersection'].add(id1+"|"+id2) # iterate through all curies for curie in curie_set: # check if curie is from answer 1 if curie in g1.keys(): # check if in intersection if len(g1[curie]['intersection'])>0: diff_dict["-1|-1"]['intersection'] += [self.node_dict1[curie]] for id1 in node_names1: for id2 in node_names2: node_tuple = id1+"|"+id2 if id1 in g1[curie]['node'] and id2 in g2[curie]['node']: diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]] elif id1 in g1[curie]['node']: diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]] elif id2 in g2[curie]['node']: diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]] # If not in intersection store in g1-g2 else: diff_dict["-1|-1"]['g1-g2'] += [self.node_dict1[curie]] for id1 in g1[curie]['node']: # iterate through all answer 2 ids for id2 in node_names2: diff_dict[id1+"|"+id2]['g1-g2'] += [self.node_dict1[curie]] # if not in g1 but in g2 then in g2-g1 elif curie in g2.keys(): diff_dict["-1|-1"]['g2-g1'] += [self.node_dict2[curie]] for id2 in g2[curie]['node']: # iterate through all answer 1 ids for id1 in node_names1: diff_dict[id1+"|"+id2]['g2-g1'] += [self.node_dict2[curie]] return diff_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dict_sets_intersection_test(self, data):\n\n data_info = self.get_data_info(data)\n finished = []\n\n for part in data:\n for union_part in data:\n if part != union_part and union_part not in finished:\n data[part].intersection(data[union_part])...
[ "0.57611847", "0.57074016", "0.5648599", "0.5608913", "0.55996966", "0.54121363", "0.539312", "0.5247284", "0.5185026", "0.51842594", "0.51647484", "0.51572686", "0.51484066", "0.5144395", "0.5093303", "0.5067902", "0.5056289", "0.5055382", "0.5027136", "0.5016611", "0.500497...
0.6736763
0
Create matrix associated with kernel interpolation.
def interp_matrix(qpnts, spnts, npgrid, nsamp, deg_max): # Initialize A = np.zeros((nsamp,npgrid)) # Create matrix for i in xrange(nsamp): for j in xrange(npgrid): cosTheta = np.dot(spnts[i], qpnts[j]) if(abs(cosTheta)>1): cosTheta = np.sign(cosTheta) A[i,j] = inv_funk_radon_kernel(cosTheta, deg_max) return A
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_interpolation_matrix(self, X):\n G = np.zeros((len(X), self.hidden_shape))\n for data_point_arg, data_point in enumerate(X):\n for center_arg, center in enumerate(self.centers):\n if self.mode == 'gaus':\n G[\n data_po...
[ "0.6601181", "0.5991661", "0.59131616", "0.5783467", "0.5772154", "0.5769376", "0.5727504", "0.571512", "0.5695456", "0.56801045", "0.5649581", "0.56263125", "0.56191933", "0.55479634", "0.54599106", "0.54450864", "0.5416208", "0.54120284", "0.54111737", "0.5408678", "0.53303...
0.5754054
6
Create matrix associated with inversion based on Aganj et al. formalism.
def interp_matrix_new(qpnts, spnts, npgrid, nsamp, deg_max): # Initialize A = np.zeros((nsamp,npgrid)) # Create matrix for i in xrange(nsamp): for j in xrange(npgrid): cosTheta = np.dot(spnts[i], qpnts[j]) if(abs(cosTheta)>1): cosTheta = np.sign(cosTheta) A[i,j] = inv_funk_radon_even_kernel(cosTheta, deg_max) return A
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n ...
[ "0.6711361", "0.6592599", "0.6546603", "0.64961314", "0.6466997", "0.64629614", "0.6427826", "0.6343991", "0.6329846", "0.6236044", "0.62210953", "0.621212", "0.62022287", "0.6199758", "0.6198103", "0.6197292", "0.6197292", "0.61816674", "0.615353", "0.61416143", "0.6140042",...
0.0
-1
Create random signal on the sphere
def rand_sig(u, b, n, theta): # Locally used names from numpy import dot, exp # Diffusion tensor parameters -- diffusion along x-axis lambda1 = 1700e-6 lambda2 = 300e-6 lambda3 = 300e-6 # diagonal diffusion tensor for "prolate white matter" D1 = np.diag([lambda1, lambda2, lambda3]) D2 = D1 D3 = D1 # rotation of diffusion tensor rotationMatrix = rotation3Dy(theta) D1 = dot(dot(rotationMatrix,D1),rotationMatrix.T) # rotationMatrix = rotation3Dz(-theta) D2 = dot(dot(rotationMatrix,D2),rotationMatrix.T) angle = np.arccos(np.cos(theta)*np.cos(theta))*180/np.pi # XXX - check with cory these semantics if n==1: s = exp(-b * dot(u, dot(D1,u)) ) # Single mode elif n==2: s = 0.5 * (exp(-b * dot(u, dot(D1,u)) ) + exp(-b * dot(u, dot(D2,u)) ) ) elif n==3: s = (1.0/3) * (exp(-b * dot(u, dot(D1,u)) ) + exp(-b * dot(u, dot(D2,u)) ) + exp(-b * dot(u, dot(D3,u)) ) ) return (angle,s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rndSphere():\n sph = [0,0,0]\n \n sph[2] = random.uniform(-1.0,1.0)\n z2 = math.sqrt(1.0 - sph[2]*sph[2])\n phi = (2. * math.pi) * random.random()\n sph[0] = z2 * math.cos(phi)\n sph[1] = z2 * math.sin(phi)\n \n return sph", "def randomPointOnSphere(r):\n x = np.random.no...
[ "0.7532667", "0.7204255", "0.71878636", "0.7109239", "0.70131403", "0.69970477", "0.6976129", "0.69127107", "0.69005173", "0.6890995", "0.6522725", "0.64420277", "0.6441751", "0.64262915", "0.6411878", "0.6391545", "0.6374727", "0.6287322", "0.6277766", "0.6242343", "0.623808...
0.0
-1
Reproducing kernel Calculate the inverse FunkRadon transform of reproducing kernel for the space of spherical harmonics of maximum degree N.
def inv_funk_radon_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) p_at_zero = legp(0, N) coefs = 2*np.arange(0, N+1, 2) + 1 ker = coefs*legPolys[::2]/p_at_zero[::2] return ker.sum() / (8*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coe...
[ "0.7213641", "0.63983333", "0.61867654", "0.61385816", "0.61052585", "0.60678124", "0.6047397", "0.59877527", "0.598379", "0.59239495", "0.584012", "0.5723425", "0.57071584", "0.5696312", "0.55959344", "0.556116", "0.5512384", "0.5488459", "0.5458828", "0.5453336", "0.543654"...
0.7255298
0
Reproducing kernel Calculate inverse FunkRadon transform and inverse spherical Laplacian of reproducing kernel for even degree subspace of spherical harmonics of maximum degree N, i.e., calculates H(\mu) = \Delta^1 G^1 K_e(\mu), where \Delta is the spherical Laplacian and G is the FunkRadon transporm. The calculation is done in spectral space.
def inv_funk_radon_even_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) p_at_zero = legp(0, N) coefs_num = 2*np.arange(0, N+1) + 1 coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1) ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den) return ker.sum() / (8.0*np.pi*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "de...
[ "0.739296", "0.6562701", "0.65557134", "0.65486634", "0.63957083", "0.6381057", "0.6323597", "0.62995636", "0.6100595", "0.59519017", "0.59446114", "0.59294933", "0.59197277", "0.5910358", "0.5873097", "0.58403426", "0.5745301", "0.5744658", "0.5742014", "0.57419306", "0.5586...
0.7455563
0
Reproducing kernel Calculate of reproducing kernel for even subspace of spherical harmonics of maximum degree N.
def even_kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs[0::2]*legPolys[0::2] return ker.sum() / (4.0*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_kernel_der(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n #Derivatives of Legendre polynomials\n DlegPolys = legp_der(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*DlegPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def inv_funk_radon...
[ "0.6827804", "0.67321193", "0.67197824", "0.65318835", "0.6531037", "0.6433048", "0.62198204", "0.6084965", "0.60187733", "0.60123444", "0.59886587", "0.59792995", "0.5959411", "0.59217125", "0.5896507", "0.58899844", "0.5817358", "0.5794526", "0.5773375", "0.5705847", "0.568...
0.7083843
0
Derivative of reproducing kernel on even subspaces of maximum degree N.
def even_kernel_der(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) #Derivatives of Legendre polynomials DlegPolys = legp_der(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs[0::2]*DlegPolys[0::2] return ker.sum() / (4.0*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def compute_gradient_kernel_respect...
[ "0.63502026", "0.60510343", "0.5926386", "0.59101856", "0.5898114", "0.56899774", "0.56315917", "0.5616251", "0.558586", "0.55735755", "0.55652493", "0.55386996", "0.5501476", "0.54612076", "0.54292554", "0.54179746", "0.54039854", "0.5398605", "0.5376709", "0.53656363", "0.5...
0.66818523
0
Reproducing kernel Calculate of reproducing kernel for subspace of spherical harmonics of maximum degree N.
def kernel(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) # Need Legendre polynomials legPolys = legp(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs*legPolys return ker.sum() / (4.0*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_...
[ "0.6673899", "0.66569775", "0.6480273", "0.6411122", "0.62793094", "0.626478", "0.60912395", "0.6079251", "0.5977385", "0.5952887", "0.5948711", "0.59083223", "0.58799875", "0.58414066", "0.58391666", "0.5796582", "0.56330234", "0.5628569", "0.5627499", "0.55669326", "0.55633...
0.6426333
3
Given the coefficients, evaluate model at a specific direction omega
def even_pODF(omega, qpoints, c, N): n,m = qpoints.shape sum = 0.0 for i in range(n): mu = np.dot(omega,qpoints[i,:]) mu = np.clip(mu, -1.0, 1.0) sum += c[i]*even_kernel(mu, N) return sum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def omega(self):\n return self._data.train_X @ self._thetas", "def solve_model():\n from scipy.integrate import ode\n # Initialise constants and state variables\n (init_states, constants) = initConsts()\n\n # Set timespan to solve over\n voi = linspace(0, 100, 5000)\n\n # Construct ODE o...
[ "0.5641719", "0.5596931", "0.5473129", "0.54537827", "0.5378322", "0.53513557", "0.528142", "0.5268483", "0.5266664", "0.5229376", "0.521022", "0.5209983", "0.52085", "0.51614934", "0.5149023", "0.50841874", "0.5070143", "0.50686336", "0.5039311", "0.5032286", "0.5028618", ...
0.0
-1
Given the coefficients, evaluate model at a specific direction (theta,phi)
def even_pODF_opt(angles,*args): # qpoints, c, N): qpoints = args[0] c = args[1] N = args[2] n,m = qpoints.shape theta,phi = angles[0], angles[1] omega = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi),np.cos(theta)]) sum = 0.0 for i in range(n): mu = np.dot(omega,qpoints[i,:]) mu = np.clip(mu, -1.0, 1.0) sum += c[i]*even_kernel(mu, N) return -(N+1)**2 * sum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval(self, theta, phi):\n required_fields = [\"coeff\", \"order\"]\n for field in required_fields:\n if field not in self.linear_fit.keys():\n raise ValueError(\"It looks like \"\n \"interface_energy_poly_expansion \"\n ...
[ "0.60838443", "0.5885254", "0.5861287", "0.5416153", "0.52906", "0.5278243", "0.52711785", "0.518602", "0.5174683", "0.51556545", "0.51364434", "0.51288843", "0.5110274", "0.5109226", "0.5090163", "0.5054213", "0.5048513", "0.50369865", "0.5005936", "0.49957308", "0.4991553",...
0.0
-1
Given the coefficients, evaluate gradient of model at a specific direction (theta,phi) returns 2x1 gradient
def even_pODF_opt_grad(angles, *args): qpoints = args[0] c = args[1] N = args[2] n,m = qpoints.shape theta,phi = angles[0], angles[1] omega = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi),np.cos(theta)]) #Partial in theta direction sum = 0.0 for i in range(n): mu = np.dot(omega,qpoints[i,:]) mu = np.clip(mu, -1.0, 1.0) r_i, theta_i, phi_i = car2sph(qpoints[i,0],qpoints[i,1],qpoints[i,2]) sum += c[i]*even_kernel_der(mu, N)*(-np.cos(theta_i)*np.cos(theta) + np.cos(phi - phi_i)*np.cos(theta)*np.sin(theta_i)) p_theta = sum #Partial in phi direction sum = 0.0 for i in range(n): mu = np.dot(omega,qpoints[i,:]) mu = np.clip(mu, -1.0, 1.0) r_i, theta_i, phi_i = car2sph(qpoints[i,0],qpoints[i,1],qpoints[i,2]) sum += c[i]*even_kernel_der(mu, N)*( -np.sin(phi - phi_i)*np.sin(theta)*np.sin(theta_i) ) p_phi = sum return -(N + 1)**2 * np.array([p_theta,p_phi])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad", "def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, t...
[ "0.73058945", "0.7201665", "0.7106998", "0.70931894", "0.70859104", "0.70620006", "0.7045631", "0.6992574", "0.6935396", "0.67486787", "0.67435336", "0.67086285", "0.67086285", "0.6706915", "0.67043006", "0.6637536", "0.6625402", "0.6624418", "0.6577811", "0.65765494", "0.657...
0.0
-1
Returns truncated iterated logarithm y = log( log(x) ) where if x<delta, x = delta and if 1delta < x, x = 1delta.
def exp_integral(x): gamma = 0.577215665 return (-gamma - expn(x,1) - np.log(x))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ilog(x,delta):\n if(delta < x and x < 1.0 - delta):\n return np.log( -np.log(x) )\n elif(x < delta):\n return np.log( -np.log(delta) )\n else: \n return np.log( -np.log(1.0 - delta) )", "def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:\n x = torch.clamp(x, eps, 1.0 - eps...
[ "0.8148231", "0.7274531", "0.7241603", "0.7214562", "0.7204403", "0.7167102", "0.71177113", "0.6768097", "0.669359", "0.6662506", "0.66573983", "0.66425556", "0.66316724", "0.66110545", "0.6557691", "0.6542005", "0.6526384", "0.6520332", "0.6498638", "0.6453334", "0.6449421",...
0.0
-1
Returns truncated iterated logarithm y = log( log(x) ) where if x<delta, x = delta and if 1delta < x, x = 1delta.
def ilog(x,delta): if(delta < x and x < 1.0 - delta): return np.log( -np.log(x) ) elif(x < delta): return np.log( -np.log(delta) ) else: return np.log( -np.log(1.0 - delta) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:\n x = torch.clamp(x, eps, 1.0 - eps)\n return torch.log(x / (1.0 - x))", "def safelog(x):\n #return np.log(x)\n return np.log(np.clip(x,floor,np.inf))", "def diff_log(x):\n \n return np.diff(np.log(x)),np.log(x)[0]", "def diff_log(x):\n\n ...
[ "0.72744215", "0.7240807", "0.7214393", "0.72042274", "0.716658", "0.711701", "0.6767486", "0.6693635", "0.6662013", "0.6657874", "0.66419184", "0.66316617", "0.66109407", "0.6556733", "0.6541151", "0.65251803", "0.6519674", "0.6498009", "0.645293", "0.6449499", "0.64343315",...
0.8147607
0
Create a 3D rotation matrix for rotation about xaxis. (1 0 0 ) R(theta) = (0 cos(x) sin(x)) (0 sin(x) cos(x))
def rotation3Dx(theta): rmat = np.zeros((3,3)) rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0 rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta) rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta) return rmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def rotation3D_x(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle...
[ "0.80429703", "0.77990216", "0.77074045", "0.76701725", "0.7432882", "0.7401681", "0.73251915", "0.7267913", "0.71938413", "0.71556383", "0.70411855", "0.7036028", "0.70312065", "0.7028589", "0.7012928", "0.69417447", "0.692393", "0.68872285", "0.6844814", "0.6838681", "0.680...
0.78315115
1
Create a 3D rotation matrix for rotation about yaxis. ( cos(x) 0 sin(x)) R(theta) = ( 0 1 0 ) ( sin(x) 0 cos(x))
def rotation3Dy(theta): rmat = np.zeros((3,3)) rmat[0,0], rmat[0,1], rmat[0,2] = np.cos(theta), 0.0, -np.sin(theta) rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, 1.0, 0.0 rmat[2,0], rmat[2,1], rmat[2,2] = np.sin(theta), 0.0, np.cos(theta) return rmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_rotate_3d_y(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_y = -deg * pi/180\n c_y = cos(rad_y)\n s_y = sin(rad_y)\n return np.matrix([[c_y, 0, s_y], [0, 1, 0], [-s_y, 0, c_y]])", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(...
[ "0.7846392", "0.7652564", "0.75773746", "0.72624236", "0.72098386", "0.71632046", "0.711217", "0.707963", "0.7057155", "0.70560473", "0.701651", "0.6999073", "0.6945559", "0.69309556", "0.68679565", "0.6837997", "0.68178195", "0.6785411", "0.6732221", "0.67160934", "0.6714742...
0.751571
3
Create a 3D rotation matrix for rotation about zaxis. ( cos(x) sin(x) 0) R(theta) = (sin(x) cos(x) 0) ( 0 0 1)
def rotation3Dz(theta): rmat = np.zeros((3,3)) rmat[0,0] = rmat[1,1] = np.cos(theta) rmat[0,1] = np.sin(theta) rmat[1,0] = -rmat[0,1] rmat[2,2] = 1 return rmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle...
[ "0.8186974", "0.802003", "0.7951418", "0.76159006", "0.75433534", "0.74739504", "0.7473712", "0.7445926", "0.7439543", "0.74234265", "0.736115", "0.7298137", "0.7291585", "0.72050965", "0.7194949", "0.7179259", "0.71736616", "0.71396255", "0.71220154", "0.71090776", "0.709833...
0.84704345
0
Return angle between two vectors in R^3, in radians
def angle(x, y, deg=False): rad_angle = np.arccos(np.dot(x, y)/ (norm(x)*norm(y))) if deg: return rad_angle*(180.0/np.pi) else: return rad_angle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle(v1: Vector, v2: Vector) -> float:\n return math.degrees(math.acos((v1 * v2) / (v1.length() * v2.length())))", "def vec_angle_rad(v1,v2):\r\n \r\n c = np.dot(v1,v2)/(vector_len(v2)* vector_len(v2))\r\n return math.acos(c)", "def angle_between_vectors(a, b):\n return math.acos(dot_produc...
[ "0.84158295", "0.8327667", "0.82317805", "0.816077", "0.8124094", "0.8080465", "0.8073348", "0.80499214", "0.791963", "0.7917364", "0.78978354", "0.78865665", "0.7873176", "0.7855662", "0.7855398", "0.7853263", "0.7845532", "0.7814118", "0.7813398", "0.778545", "0.7780554", ...
0.0
-1
Compute the geodesic distance on the sphere for two points. The points are assumed to lie on the surface of the same sphere.
def spherical_distances(x, y): # Compute the norms of all points, we do NOT check they actually all lie on # the same sphere (that's the caller's responsibility). xn = np.sqrt((x**2).sum(axis=1)) yn = np.sqrt((y**2).sum(axis=1)) ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :]) # Protect against numerical noise giving us cosine values outside the -1,1 # range, where arccos would return nans. ang_cos = np.clip(ang_cos, -1, 1) return xn[:, None]*np.arccos(ang_cos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance_sphere(self, other):\n if not self.crs == getattr(other, \"crs\", \"EPSG:4326\") == \"EPSG:4326\":\n raise ValueError(\"Only can calculate spherical distance with 'EPSG:4326' crs.\")\n return _binary_op(arctern.ST_DistanceSphere, self, other)", "def distance_on_sphere(lat1, ...
[ "0.7638325", "0.70502967", "0.6934199", "0.6927222", "0.6903243", "0.6792316", "0.6787839", "0.6785977", "0.6757207", "0.6749283", "0.6746814", "0.6737798", "0.6729415", "0.6711453", "0.66890496", "0.66774637", "0.664188", "0.6629779", "0.6612115", "0.66114235", "0.66067094",...
0.7146757
1
Estimate the bandwith ie the radius to use with an RBF kernel in the MeanShift algorithm
def estimate_bandwidth(X, quantile=0.3): distances = spherical_distances(X, X) distances = np.triu(distances, 1) distances_sorted = np.sort(distances[distances > 0]) bandwidth = distances_sorted[np.floor(quantile * len(distances_sorted))] return bandwidth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _kernel(self, bw, X, x):\n return (1.0 / np.sqrt(2 * np.pi) / bw) * np.exp(\n -((X - x) ** 2) / (bw ** 2 * 2.0)\n )", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def get_bollinger_bands(rm, rstd):\n \n upper_band=rm+2*rstd\n low...
[ "0.6161731", "0.61592114", "0.6056328", "0.59708244", "0.5914149", "0.5861118", "0.58004457", "0.5719034", "0.57188016", "0.57011205", "0.5696227", "0.5671052", "0.56710136", "0.5659563", "0.56463724", "0.56430066", "0.5637317", "0.5626428", "0.56258416", "0.5621374", "0.5596...
0.0
-1
Perform MeanShift Clustering of data using a flat kernel
def mean_shift(X, bandwidth=None): if bandwidth is None: bandwidth = estimate_bandwidth(X) n_points, n_features = X.shape n_clusters = 0 bandwidth_squared = bandwidth**2 points_idx_init = np.arange(n_points) stop_thresh = 1e-3*bandwidth # when mean has converged cluster_centers = [] # center of clusters # track if a points been seen already been_visited_flag = np.zeros(n_points, dtype=np.bool) # number of points to possibly use as initilization points n_points_init = n_points # used to resolve conflicts on cluster membership cluster_votes = [] random_state = np.random.RandomState(0) while n_points_init: # pick a random seed point tmp_index = random_state.randint(n_points_init) # use this point as start of mean start_idx = points_idx_init[tmp_index] my_mean = X[start_idx, :] # intilize mean to this points location # points that will get added to this cluster my_members = np.zeros(n_points, dtype=np.bool) # used to resolve conflicts on cluster membership this_cluster_votes = np.zeros(n_points, dtype=np.uint16) while True: # loop until convergence # dist squared from mean to all points still active # FIXME - this needs to be converted to spherical distances. sqrt_dist_to_all = np.sum((my_mean - X)**2, axis=1) # points within bandwidth in_idx = sqrt_dist_to_all < bandwidth_squared # add a vote for all the in points belonging to this cluster this_cluster_votes[in_idx] += 1 my_old_mean = my_mean # save the old mean my_mean = np.mean(X[in_idx, :], axis=0) # compute the new mean # add any point within bandwidth to the cluster my_members = np.logical_or(my_members, in_idx) # mark that these points have been visited been_visited_flag[my_members] = True if np.linalg.norm(my_mean-my_old_mean) < stop_thresh: # check for merge possibilities merge_with = -1 for c in range(n_clusters): # distance from possible new clust max to old clust max dist_to_other = np.linalg.norm(my_mean - cluster_centers[c]) # if its within bandwidth/2 merge new and old if dist_to_other < bandwidth/2: merge_with = c break if merge_with >= 0: # something to merge # record the max as the mean of the two merged # (I know biased twoards new ones) cluster_centers[merge_with] = 0.5 * (my_mean+ cluster_centers[merge_with]) # add these votes to the merged cluster cluster_votes[merge_with] += this_cluster_votes else: # its a new cluster n_clusters += 1 # increment clusters cluster_centers.append(my_mean) # record the mean cluster_votes.append(this_cluster_votes) break # we can initialize with any of the points not yet visited points_idx_init = np.where(been_visited_flag == False)[0] n_points_init = points_idx_init.size # number of active points in set # a point belongs to the cluster with the most votes labels = np.argmax(cluster_votes, axis=0) return cluster_centers, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_meanshift(self, data, bandwidth=None, bin_seeding=False):\n if bandwidth is None:\n bandwidth = cl.estimate_bandwidth(data)\n\n ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding)\n ms.fit(data)\n\n labels = ms.labels_\n\n return labels, [np.nan...
[ "0.6720604", "0.6584007", "0.6550382", "0.64145166", "0.6413286", "0.627287", "0.6238827", "0.61965424", "0.61758494", "0.6154797", "0.6143025", "0.6117063", "0.6048166", "0.6015218", "0.6009818", "0.6004305", "0.5915785", "0.59050244", "0.5881358", "0.58780664", "0.5875312",...
0.62292767
7
References 'Distributing many points on a sphere' by E.B. Saff and A.B.J. Kuijlaars, Mathematical Intelligencer, 19.1 (1997), pp. 511
def saff_kuijlaars(N): k = np.arange(N) h = -1 + 2.0 * k / (N - 1) theta = np.arccos(h) phi = np.zeros_like(h) for i in range(1, N - 1): phi[i] = (phi[i - 1] + 3.6 / np.sqrt(N * (1 - h[i]**2))) % (2.0 * np.pi) return sph2car(np.ones_like(theta), theta, phi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sphere(indiv):\n return sum([ x ** 2 for x in indiv])", "def sphere(self, x):\r\n # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]\r\n return sum((x+0)**2)", "def partsphere(self, x):\r\n self.counter += 1\r\n # return np.random.rand(1)[0]**0 * sum(x**2...
[ "0.7038946", "0.6950157", "0.69140077", "0.68093324", "0.66389656", "0.6611303", "0.6572137", "0.648919", "0.6481571", "0.6474984", "0.64451575", "0.64319617", "0.6428179", "0.64160025", "0.640967", "0.6393354", "0.6356308", "0.633187", "0.62893623", "0.62585074", "0.6178017"...
0.0
-1
Convert spherical coordinates to Cartesian coordinates.
def sph2car(r, theta, phi): x = r * np.sin(theta) * np.cos(phi) y = r * np.sin(theta) * np.sin(phi) z = r * np.cos(theta) return x, y, z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SphericalToCartesian(Spherical):\n\n # r,theta,phi -> x,y,z\n r = Spherical[:,0]\n st = np.sin(Spherical[:,1])\n sp = np.sin(Spherical[:,2])\n ct = np.cos(Spherical[:,1])\n cp = np.cos(Spherical[:,2])\n x = r*st*cp\n y = r*st*sp\n z = r*ct\n\n if (len(Spherical[0,:])==3):\n ...
[ "0.79381543", "0.77366406", "0.77325046", "0.76741", "0.7667967", "0.76263297", "0.7475714", "0.7456057", "0.7433543", "0.7418165", "0.7412523", "0.736814", "0.7343558", "0.73400825", "0.73142827", "0.7298736", "0.7206924", "0.7128369", "0.707744", "0.6962617", "0.6935745", ...
0.5963385
91
Sample the pODF using a rejection technique.
def sample_pODF(nsamples,qpoints,coefs,N): points = np.zeros((nsamples,4)) #Maximum of pODF C = ( (N + 1.0)**2 / (4.0 * np.pi) ) * coefs.sum() number_of_samples = 0 while number_of_samples < nsamples: #Random sample on the sphere rphi = np.random.uniform( 0.0, 2.0*np.pi) rmu = np.random.uniform(-1.0, 1.0) rsin_theta = np.sqrt(1.0 - rmu**2) x,y,z = rsin_theta * np.cos(rphi), rsin_theta * np.sin(rphi), rmu f = np.abs(even_pODF(np.array([x,y,z]),qpoints,coefs,N)) #Uniform random used for rejection rho = np.random.uniform(0.0, 1.0) if C*rho < f: #Accept random point points[number_of_samples,:] = np.array([x,y,z,f/C]) number_of_samples += 1 return points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_does_not_sample_twice_ppswor(self):\n with self.assertRaises(ValueError):\n s = private_sampling.ThresholdSample(\n 1.0, private_sampling.PpsworSamplingMethod)\n s.process(\"a\", math.log(FAILURE_PROBABILITY_INVERSE, math.e))\n s.process(\"a\", 1)", "def rejection_sample(self,...
[ "0.6341869", "0.6320421", "0.6018086", "0.5842575", "0.58349407", "0.57898366", "0.57707685", "0.5673086", "0.5649213", "0.56451184", "0.562531", "0.5600256", "0.55618894", "0.55617553", "0.5559686", "0.5514322", "0.55063957", "0.5383353", "0.5362512", "0.53615665", "0.531038...
0.577311
6
Compute a similarity matrix for a set of points. The points are assumed to lie on the surface of the same sphere.
def similarity_matrix(points, sigma): distances_squared = spherical_distances(points, points)**2 return np.exp( -distances_squared / (2.0 * sigma) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def self_similarity_matrix(feature_vectors):\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix", "de...
[ "0.66129136", "0.63335097", "0.62122184", "0.6183862", "0.60915583", "0.60669845", "0.59568816", "0.5932109", "0.5899266", "0.588513", "0.5874931", "0.5850991", "0.5815165", "0.5814901", "0.58031887", "0.5772447", "0.5766222", "0.5751658", "0.57376456", "0.5730853", "0.571602...
0.7539285
0
Compute a graph Laplacian for a set of points. The points are assumed to lie on the surface of the same sphere.
def laplacian(points, sigma): S = similarity_matrix(points, sigma) (npnts,npnts) = S.shape D = np.zeros_like(S) for i in range(npnts): #D[i,i] = 1.0 / np.sqrt(S[i,:].sum()) D[i,i] = S[i,:].sum() return (D - S) #(np.eye(npnts,npnts) - np.dot(D,np.dot(S,D)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def laplacian(self, point):\n n_vertices, n_faces = point.shape[-2], self.faces.shape[0]\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n len_edge_12 = gs.linalg.norm((vertex_1 - vertex_2), axis=-1)\n len_edge_02 = gs.linalg.norm((vertex_0 - vertex_2), axis=-1)\n len_edge...
[ "0.6669824", "0.62479734", "0.6143647", "0.60507256", "0.60478675", "0.5883016", "0.58383894", "0.5776216", "0.5732057", "0.56500006", "0.5618606", "0.5549746", "0.5351798", "0.5340996", "0.5318419", "0.53017426", "0.52880955", "0.5279453", "0.5237156", "0.52357775", "0.52184...
0.6151292
2
Decorator to help verify that a function was actually executed. Annotates a function with an attribute 'didrun', and only sets it to True if the function is actually called.
def checkrun(f): @functools.wraps(f) def wrapper(*args, **kwargs): wrapper.didrun = True return f(*args, **kwargs) wrapper.didrun = False return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_called(self, func):\n self.called[func] = False\n def _check(*args, **kwargs):\n self.called[func] = True\n return func(*args, **kwargs)\n return _check", "def run_once(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run...
[ "0.65756035", "0.6015735", "0.60046905", "0.5997352", "0.59535944", "0.5899848", "0.5813919", "0.57161885", "0.5611487", "0.55894804", "0.5569097", "0.556421", "0.55513084", "0.55510217", "0.55382", "0.5538129", "0.55316585", "0.5526899", "0.55099505", "0.5504471", "0.5504471...
0.8174126
0
Developers can define their own workflow classes in external python packages, in which case the workflowname must be specified as a fullyqualified class name.
def test_workflow_class_discovery(): config = { "workflow-name": "tests.workflows.test_workflow.CustomWorkflow", "cluster-type": CLUSTER_TYPE } template_dir = tempfile.mkdtemp(suffix="test-workflow-discovery-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) _execution_dir, workflow = launch_flow(template_dir, 1) assert isinstance(workflow, CustomWorkflow) assert workflow.execute.didrun
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_workflow_class(workflow_name):\n try:\n module_name = \"workflow.\" + workflow_name\n importlib.import_module(module_name)\n return True\n except ImportError:\n return False", "def get_workflow_object(\n workflow_name, settings, logger, client, token, decision, max...
[ "0.7437935", "0.66066194", "0.6272784", "0.62531227", "0.5922669", "0.59157443", "0.58219445", "0.5800828", "0.57692146", "0.5733996", "0.5662112", "0.56386393", "0.55851597", "0.5581028", "0.5571103", "0.55317545", "0.55233943", "0.5494293", "0.5466347", "0.54425746", "0.543...
0.6346265
2
Users can specify environment variables in their config file which will be set in the driver and worker environments. Make sure those variables are set during the workflow, but not after.
def test_workflow_environment(): config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "environment-variables": { "FOO": "BAR", "FOO2": "BAR2" } } template_dir = tempfile.mkdtemp(suffix="test-workflow-environment-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) @checkrun def execute(workflow_inst): def _check(): assert os.environ['FOO'] == "BAR" assert os.environ["OMP_NUM_THREADS"] == '1' return True # driver env _check() # worker env assert all(workflow_inst.run_on_each_worker(_check).values()) os.environ['FOO'] = 'ORIGINAL_FOO' _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute) assert execute.didrun # Environment is restored after execution is finished. assert os.environ['FOO'] == 'ORIGINAL_FOO' assert 'FOO2' not in os.environ
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def env_config():\n # setup\n env = {'ELB_GCP_PROJECT': 'expected-gcp-project',\n 'ELB_GCP_REGION': 'expected-gcp-region',\n 'ELB_GCP_ZONE': 'expected-gcp-zone',\n 'ELB_BATCH_LEN': '93',\n 'ELB_CLUSTER_NAME': 'expected-cluster-name',\n 'ELB_RESULTS': 'gs://ex...
[ "0.70271873", "0.7026732", "0.69392926", "0.69331855", "0.68138427", "0.6760369", "0.6758569", "0.6714527", "0.67041445", "0.66552407", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.6584399", "0.65421516", "0.6530069", "0.6528909", "0....
0.7265635
0
The config can specify a resource manager server address as "driver", which means the workflow should launch the resource manager on the scheduler machine. Make sure it launches, but is also shut down after the workflow exits.
def test_resource_manager_on_driver(): config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "resource-manager": { "server": "driver", "port": 4000, "config": { "read_reqs": 123, "read_data": 456, "write_reqs": 789, "write_data": 321 } } } template_dir = tempfile.mkdtemp(suffix="test-resource-manager-on-driver-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) @checkrun def execute(workflow_inst): client = ResourceManagerClient('127.0.0.1', 4000) mgr_config = client.read_config() assert mgr_config == config["resource-manager"]["config"], \ "Resource manager config does not match the one in the workflow config" _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute) assert execute.didrun # FIXME: For mysterious reasons, the check below does not work on Travis-CI. # Somehow, read_config() succeeds despite the fact that # the resource manager server was already terminated?? if os.environ.get('TRAVIS', '') == 'true': pytest.skip("Skipping resource manager shutdown check on Travis-CI") # Server should not be running any more after workflow exits. with pytest.raises(TimeoutError): client2 = ResourceManagerClient('127.0.0.1', 4000) client2.read_config()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch(config):\n \n launch_with_configs([config])", "def run_xenon_simple(workflow, machine, worker_config):\n scheduler = Scheduler()\n\n return scheduler.run(\n xenon_interactive_worker(machine, worker_config),\n get_workflow(workflow)\n )", "def test_set_power_schedule_for_de...
[ "0.59481937", "0.5393874", "0.5334805", "0.53214973", "0.52877617", "0.5262214", "0.52576655", "0.52325445", "0.51960254", "0.5173219", "0.51681364", "0.5132155", "0.51199645", "0.5117904", "0.5104992", "0.5102506", "0.5085037", "0.5080357", "0.50710267", "0.5063733", "0.5049...
0.67965436
0
Setup for test_worker_initialization(), below. Parameterized for the "oncepermachine' case (and its opposite).
def setup_worker_initialization_template(request): once_per_machine = request.param template_dir = tempfile.mkdtemp(suffix="test-worker-initialization") worker_script = f"{template_dir}/do-nothing.sh" with open(worker_script, 'w') as f: f.write("#!/bin/bash\n") f.write("sleep 10") os.chmod(worker_script, 0o777) config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "worker-initialization": { "script-path": "do-nothing.sh", "only-once-per-machine": once_per_machine, "script-args": ["_TEST_SCRIPT_FAKE_ARG_"], # This is just here to make it easy to identify the process "launch-delay": 0 } } with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) return template_dir, config, once_per_machine
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_worker_initialization(setup_worker_initialization_template):\n template_dir, _config, once_per_machine = setup_worker_initialization_template\n \n num_workers = 2\n if once_per_machine or CLUSTER_TYPE in (\"synchronous\", \"processes\"):\n expected_script_count = 1\n else:\n e...
[ "0.7411761", "0.71275353", "0.6863804", "0.68591213", "0.68004584", "0.67204416", "0.66945666", "0.66871023", "0.66871023", "0.6637621", "0.6634074", "0.657743", "0.6510289", "0.6509755", "0.6507559", "0.6495748", "0.6495748", "0.64917386", "0.6455389", "0.64043945", "0.63973...
0.6802044
4
The config can specify a script to be run on each worker upon cluster initialization. This test verifies that it is launched and active while the workflow runs, and that it is launched on each worker, or just once per machine, depending on the config.
def test_worker_initialization(setup_worker_initialization_template): template_dir, _config, once_per_machine = setup_worker_initialization_template num_workers = 2 if once_per_machine or CLUSTER_TYPE in ("synchronous", "processes"): expected_script_count = 1 else: expected_script_count = num_workers @checkrun def execute(workflow_inst): script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) assert script_count > 0, f"Worker script is not running. Check logs in:\n{script_dir}" assert script_count <= expected_script_count, f"Worker script started too many times. Check logs in:\n{script_dir}" assert script_count == expected_script_count, f"Worker script not started on all workers. Check logs in:\n{script_dir}" _execution_dir, workflow_inst = launch_flow(template_dir, num_workers, _custom_execute_fn=execute) script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) assert script_count == 0, \ ("Worker script(s) remained running after the workflow exited."\ f"Check logs in:\n{script_dir}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cluster_jobs_script(self):\r\n\r\n qiime_config = load_qiime_config()\r\n submit_script = qiime_config['cluster_jobs_fp']\r\n\r\n if (submit_script):\r\n full_path = which(submit_script)\r\n if full_path:\r\n submit_script = full_path\r\n ...
[ "0.6706494", "0.6451912", "0.62901366", "0.6208054", "0.6111138", "0.59998226", "0.59990007", "0.59590906", "0.5952131", "0.5922806", "0.5853597", "0.579472", "0.57890517", "0.57299614", "0.5724651", "0.5720135", "0.57154197", "0.57125825", "0.5700395", "0.5678336", "0.567510...
0.70054567
0
You can provide an initialization script for each worker to call before the workflow starts. The most common usecase for such a script is to launch a local dvid server on each worker (for posting in parallel to the cloud). We provide the necessary script for local dvid workers outofthebox, in scripts/workerdvid. This test verifies that it works.
def test_worker_dvid_initialization(): repo_dir = Path(flyemflows.__file__).parent.parent template_dir = tempfile.mkdtemp(suffix="test-worker-dvid") # Copy worker script/config into the template shutil.copy(f'{repo_dir}/scripts/worker-dvid/dvid.toml', f'{template_dir}/dvid.toml') shutil.copy(f'{repo_dir}/scripts/worker-dvid/launch-worker-dvid.sh', f'{template_dir}/launch-worker-dvid.sh') config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "worker-initialization": { "script-path": "launch-worker-dvid.sh", "only-once-per-machine": True, "script-args": ["_TEST_SCRIPT_FAKE_ARG_"], # This is just here to make it easy to identify the process "launch-delay": 1.0 } } with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) def is_worker_dvid_running(): return len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) > 0 @checkrun def execute(workflow_inst): script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent assert is_worker_dvid_running(), f"Worker DVID is not running. Check logs in:\n{script_dir}" _execution_dir, workflow_inst = launch_flow(template_dir, 1, _custom_execute_fn=execute) script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent assert not is_worker_dvid_running(), \ ("Worker DVID remained running after the workflow exited."\ f"Check logs in:\n{script_dir}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_worker_initialization(setup_worker_initialization_template):\n template_dir, _config, once_per_machine = setup_worker_initialization_template\n \n num_workers = 2\n if once_per_machine or CLUSTER_TYPE in (\"synchronous\", \"processes\"):\n expected_script_count = 1\n else:\n e...
[ "0.7326153", "0.70049655", "0.6345922", "0.6308314", "0.6258606", "0.6253948", "0.6138363", "0.6117165", "0.61046404", "0.60405153", "0.6030067", "0.6017148", "0.6017148", "0.60138994", "0.5929832", "0.5922357", "0.5922357", "0.5841495", "0.5836372", "0.5836372", "0.5832595",...
0.7815065
0
Return the next power of 10
def nextpow10(n): if n == 0: return 0 else: return math.ceil(math.log10(abs(n)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n return i", "def _next_power_of_two(self, n):\n if n == 0:\n return 1\n return int(2 ** math.ceil(math.log2(n)))", "def next_power2(num):\n return 2 ** int(np.ceil(np.log2(num)))", "def next_power_2(x: int) -> int:\n r...
[ "0.7424253", "0.73961514", "0.7280163", "0.7175434", "0.713948", "0.71352744", "0.69893503", "0.6905554", "0.68357366", "0.67452294", "0.6701021", "0.6685439", "0.6657409", "0.6649841", "0.66056854", "0.658991", "0.6583599", "0.656067", "0.6541835", "0.6518142", "0.6472119", ...
0.8007733
0
Return a number that looks 'nice', with a maximum error
def magicnr(value, error): magics = [ (10 ** (nextpow10(error))), (10 ** (nextpow10(error))) / 2.0, (10 ** (nextpow10(error))) / 4.0, (10 ** (nextpow10(error))) / 10.0, (10 ** (nextpow10(error))) / 20.0, (10 ** (nextpow10(error))) / 40.0, (10 ** (nextpow10(error))) / 100.0, ] magics.sort() magics.reverse() magic = magics[-1] for n in magics: if n < abs(value): magic = n break return fround(value, magic)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_precision(err):\n return max(0, int(-math.log10(2 * err)) + 1)", "def computeGoodMax(totalTimes, noerrs):\n # Could allow a small amount of space above the top, but it's annnoying for percentages!\n # return None\n factor = 1.00\n maxReading = factor * max(\n [max([v for v in l if ...
[ "0.684441", "0.6396268", "0.620864", "0.6083352", "0.6061179", "0.6019383", "0.601921", "0.5970972", "0.5961703", "0.59382796", "0.5932441", "0.592237", "0.5899084", "0.5895729", "0.58700436", "0.58562726", "0.5854558", "0.58429986", "0.58234286", "0.5815658", "0.58116823", ...
0.66190135
1
Get the path to a CSV by name.
def _get_csv_path(name): return os.path.join(cwd, 'output/app_info', name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_path(name):\n return \"./data/%s\" % name", "def csv_dir(self):\n return op.join(self.root_dir, 'csv')", "def get_cached_csv(self, category: str) -> str:\n csv_path = f\"{self.csv_dir}/{category.lower()}.csv\"\n if path.exists(csv_path):\n return csv_path\n rai...
[ "0.82250005", "0.64448506", "0.6438859", "0.638522", "0.635775", "0.62658775", "0.6196743", "0.60490125", "0.60278124", "0.5936994", "0.59059805", "0.58804125", "0.58804125", "0.58078647", "0.5763088", "0.5756751", "0.5756751", "0.5756751", "0.5755957", "0.57534754", "0.57398...
0.7574934
1
Turns the CSV into a workable dictionary.
def _csv_to_dict(name): csv_path = _get_csv_path(name) result = [] with open(csv_path) as csvfile: reader = csv.DictReader(csvfile) for row in reader: result.append(row) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_to_dict(self):\n log = logger.configure(\"default\")\n try: \n df = pd.read_csv(self.__csv_path)\n except IOError as e:\n # file not found\n log.error('Could not import {}. Got error {}'.format(self.__csv_path, e))\n raise \n else:\n ...
[ "0.773511", "0.75966036", "0.7308401", "0.7227723", "0.7156224", "0.7100157", "0.70313346", "0.6998957", "0.69969165", "0.69475853", "0.69401574", "0.6889729", "0.6880265", "0.68693936", "0.6866036", "0.68612945", "0.68607634", "0.6772458", "0.6684127", "0.6684127", "0.660402...
0.7150343
5
Get the app's name.
def _get_app_name(app): return app[APP_NAME_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_name():\n return config.APP_NAME", "def app_name(self) -> str:\n return self._app_name", "def app_name(self):\n return self._app_name", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def get_app_name(self):\n return get...
[ "0.9113221", "0.88275534", "0.8791967", "0.87901825", "0.8730592", "0.8585392", "0.85836774", "0.84330225", "0.83433735", "0.83433735", "0.8274585", "0.81731343", "0.8169378", "0.8116642", "0.80174756", "0.7825664", "0.7809752", "0.7760691", "0.7760691", "0.7760691", "0.77606...
0.88942343
1
Get the contact's first name.
def _get_contact_first_name(app): name = app.get(CONTACT_NAME_KEY) if name: return ' {}'.format(name.split(' ')[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_name(self):\n return self._first_name", "def get_first_name(self) -> str:\n return self.first_name", "def first_name(self):\n return self._first_name", "def first_name(self):\n return self._first_name", "def first_name(self):\n return self._first_name", "d...
[ "0.86260074", "0.8617128", "0.8315871", "0.8315871", "0.8315871", "0.8312541", "0.81962097", "0.8183392", "0.8183392", "0.80284345", "0.7993891", "0.7896602", "0.7896602", "0.78449786", "0.7739931", "0.77090037", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754"...
0.8650525
0
Get the contacts email address.
def _get_contact_email(app): return app[CONTACT_EMAIL_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = N...
[ "0.83159286", "0.8266356", "0.7886328", "0.7697728", "0.75914663", "0.7493357", "0.7450327", "0.73740244", "0.73260653", "0.7248775", "0.7248775", "0.7248775", "0.72211725", "0.72211725", "0.72211725", "0.72211725", "0.71998024", "0.71859884", "0.7072096", "0.70538586", "0.70...
0.7802634
3
Get the subject to send with the email.
def _get_email_subject(app_name): return '{} <==> Tote'.format(app_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subject(self):\n return self.mail.get('Subject')", "def getSubject(self):\r\n return self.msg[\"Subject\"]", "def subject(self):\n return self.properties.get(\"subject\", None)", "def get_subject(self):\n return self._subject", "def subject(self):\n return self.get(\"subj...
[ "0.85039127", "0.84494317", "0.79497236", "0.7938282", "0.7885958", "0.7774435", "0.7774435", "0.7769633", "0.77543914", "0.77393216", "0.7721028", "0.76066816", "0.74656695", "0.74626553", "0.7323837", "0.7316676", "0.7280974", "0.7280175", "0.72613007", "0.72613007", "0.712...
0.6934014
21
Get the email template name for the first contact email.
def _get_first_contact_email_template_name(app): return app[FIRST_CONTACT_EMAIL_TEMPLATE_NAME_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n ...
[ "0.68836665", "0.6812669", "0.66186845", "0.66120255", "0.6575655", "0.6516548", "0.64882386", "0.64112824", "0.63238996", "0.6301571", "0.6288311", "0.6213783", "0.6116907", "0.60900533", "0.6080471", "0.6079054", "0.60654145", "0.6055386", "0.60473263", "0.60457283", "0.603...
0.87440306
0
Gets the tote store url for this app.
def _get_app_tote_store_url(app): return app[APP_TOTE_STORE_URL]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNoteStoreUrl(self, authenticationToken):\r\n pass", "def getNoteStoreUrl(self, authenticationToken):\r\n self.send_getNoteStoreUrl(authenticationToken)\r\n return self.recv_getNoteStoreUrl()", "def get_store_path(cls):\n user_data_dir = cls.user_data_dir()\n store_path = os.path.j...
[ "0.6696655", "0.64120066", "0.6146364", "0.5828228", "0.57313335", "0.5705295", "0.56562585", "0.5626252", "0.5613728", "0.5612579", "0.5582851", "0.555923", "0.555923", "0.5541939", "0.5541939", "0.5510885", "0.55108297", "0.5492612", "0.5492612", "0.5468174", "0.54611695", ...
0.84141535
0
Check if we already sent the first contact email.
def _did_send_first_contact_email(app): first_contact = app[FIRST_CONTACT_EMAIL_SENT_KEY] if first_contact and first_contact.lower() == 'y': return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsfirstAddContact(self):\n if search_text(contact.get_value('accounts'), isScrollable = 0, searchFlag = TEXT_CONTAINS):\n click_in_list_by_index(0)\n return True\n else:\n return False", "def recent_email_sent(self):\n recent_contact_activity = self.activ...
[ "0.6324304", "0.6179195", "0.610429", "0.59007627", "0.58576906", "0.5768399", "0.5731466", "0.57095504", "0.5704638", "0.56970215", "0.5689795", "0.566889", "0.56208533", "0.5612742", "0.5604829", "0.5577359", "0.55763453", "0.55086017", "0.55008966", "0.5475762", "0.5462582...
0.8393486
0
Prints a summary of the results.
def _print_summary(results): if not len(results) > 0: print 'No results to show in summary.' return table = {} for res in results: for k, v in res.iteritems(): table.setdefault(k, []).append(v) print tabulate(table, headers='keys', tablefmt="simple")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printSummary(self):\n pass", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #er...
[ "0.82503015", "0.8232667", "0.79443955", "0.76772296", "0.7640588", "0.74202365", "0.7303483", "0.72779363", "0.7266571", "0.7234515", "0.7221001", "0.7205003", "0.7152706", "0.7136578", "0.7104214", "0.709339", "0.7073432", "0.70705724", "0.70628256", "0.70268404", "0.701721...
0.8227009
2
Sends out emails to the apps in the provided csv.
def send(app_csv='apps.csv', verbose=True, dry_run=True): results = [] app_info = _csv_to_dict(app_csv) for app in app_info: # Get all the app info needed for this request. app_name = _get_app_name(app) contact_first_name = _get_contact_first_name(app) email_address = _get_contact_email(app) app_tote_store_url = _get_app_tote_store_url(app) subject = _get_email_subject(app_name) # If we already sent the first contact email, continue. if _did_send_first_contact_email(app): result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='skipped', error=None, ) logger.info(result) results.append(result) continue try: # Get the appropriate template to send. email_template = _get_first_contact_email_template_name(app) template = env.get_template(email_template) # Render the template with app info. content = template.render( app_name=app_name, contact_first_name=contact_first_name, app_tote_store_url=app_tote_store_url, ) send_email(to=email_address, subject=subject, html=content, dry_run=dry_run) result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='success', error=None, ) except Exception as e: result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='failure', error=str(e), ) logger.info(result) results.append(result) # Sleep momentarily to avoid dos'ing the server. if not dry_run: time.sleep(0.1) if verbose: _print_summary(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_email_csv(csv_input):\n # Get a pandas dataframe column with all of the relevant duns numbers\n\n df = pd.read_csv(csv_input)\n duns_numbers = df.dunsnumber.tolist()\n\n # Gets the file number for the current file by taking the max of all of the other numbers in the lists directory and add...
[ "0.5723836", "0.556769", "0.5490822", "0.54697037", "0.54161894", "0.5410227", "0.5337697", "0.53324795", "0.5231842", "0.52236265", "0.5195237", "0.5186955", "0.51598907", "0.51523805", "0.5090487", "0.508662", "0.5077356", "0.50331646", "0.50279236", "0.50225353", "0.501493...
0.7534394
0
First build the sum of divisors for all numbers and then look for nonabundants
def solver2(input_val): sum_div = [1] * (input_val + 1) for i in range(2, int(input_val ** 0.5) + 1): sum_div[i * i] += i for k in range(i + 1, input_val // i + 1): sum_div[k * i] += k + i abundants, result = set(), 0 for n in range(1, input_val + 1): if sum_div[n] > n: abundants.add(n) if not any((n - a in abundants) for a in abundants): result += n return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def non_abundant_sums():\n # the sum of divisors of every number\n divisor_sum = [0] * LIMIT\n for i in range(1, LIMIT):\n for j in range(i * 2, LIMIT, i):\n divisor_sum[j] += i\n # abundant numbers\n abundant_nums = [i for (i, x) in enumerate(divisor_sum) if x > i]\n\n expressi...
[ "0.71596134", "0.6881637", "0.6874127", "0.68740165", "0.6870829", "0.6703803", "0.66957605", "0.66898984", "0.6660096", "0.66297686", "0.6602979", "0.65995604", "0.6504165", "0.64978504", "0.6480131", "0.6476575", "0.6474925", "0.6459346", "0.64593166", "0.6448392", "0.63862...
0.687742
2
Perform a string compression on the input string.
def string_compression(input_string): compressed_string = '' char_count = 1 prev_char = '' for char in input_string: if char == prev_char: char_count += 1 else: compressed_string = compressed_string + str(char_count) + char char_count = 1 prev_char = char return compressed_string[1:] + str(char_count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compress(string):", "def compress(string):", "def compression(s):", "def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)", "def compress(string):\n \n # Build the dictionary.\n dict_size = 256\n see...
[ "0.88319397", "0.88319397", "0.8700479", "0.79835236", "0.7687754", "0.7666799", "0.7238186", "0.7213857", "0.7115728", "0.7022821", "0.6945289", "0.69063985", "0.6815695", "0.66225106", "0.65985954", "0.65551895", "0.65420026", "0.649725", "0.6487858", "0.6423921", "0.641245...
0.7696003
4
writes data from instream into additional allocated clusters of given file. Metadata of this file will be stored in Metadata object
def write(self, instream: typ.BinaryIO, filepath: str, filename: str = None) -> None: if filename is not None: filename = path.basename(filename) if self.fs_type == 'FAT': allocator_metadata = self.fs.write(instream, filepath) self.metadata.add_file(filename, allocator_metadata) elif self.fs_type == 'NTFS': allocator_metadata = self.fs.write(instream, filepath) self.metadata.add_file(filename, allocator_metadata) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __write(self, storage):\n\n positions = storage.get_positions()\n if len(positions) == 0: return\n\n X = storage.get_X()\n Y = storage.get_Y()\n\n if Y: assert len(positions) == len(X) == len(Y)\n else: assert len(positions) == len(X)\n\n start, end = positions[...
[ "0.56617075", "0.56513876", "0.561701", "0.55935436", "0.5525497", "0.5482356", "0.54683065", "0.54394776", "0.5432194", "0.5414132", "0.5392", "0.5390164", "0.53824586", "0.53770757", "0.5337196", "0.5310381", "0.5272025", "0.5262886", "0.5257946", "0.52343404", "0.52302665"...
0.60996723
0
writes hidden data from slackspace into stream. The examined slack space information is taken from metadata.
def read(self, outstream: typ.BinaryIO): file_metadata = self.metadata.get_file("0")['metadata'] if self.fs_type == 'FAT': allocator_metadata = FATAllocatorMeta(file_metadata) self.fs.read(outstream, allocator_metadata) elif self.fs_type == 'NTFS': allocator_metadata = NTFSAllocatorMeta(file_metadata) self.fs.read(outstream, allocator_metadata) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_hidden(file_name, data):\n # For *nix add a '.' prefix.\n prefix = '.' if os.name != 'nt' else ''\n file_name = prefix + file_name\n\n # Write file.\n with open(file_name, 'w') as f:\n f.write(data)\n\n # For windows set file attribute.\n if os.name == 'nt':\n ret = cty...
[ "0.51169866", "0.50111604", "0.4976669", "0.4859597", "0.4805777", "0.4788097", "0.46907136", "0.4596308", "0.45624852", "0.4542059", "0.45340312", "0.45145792", "0.45093474", "0.45045894", "0.4497126", "0.4490438", "0.44690353", "0.44639584", "0.44553003", "0.44552484", "0.4...
0.0
-1
reads hidden data from slack into files
def read_into_file(self, outfilepath: str): if self.fs_type == 'FAT': with open(outfilepath, 'wb+') as outfile: self.read(outfile) elif self.fs_type == 'NTFS': with open(outfilepath, 'wb+') as outfile: self.read(outfile) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_parsed_data():\n\n echonest_data_files = [f for f in os.listdir('.') if re.match(\"^echonest_[\\w]+.txt$\", f)]\n\n # Setting up header with user id and attributes\n header = ['user_id']\n header.extend(ATTRIBUTES)\n\n # printing header to standard out\n print \",\".join(header) \n\n # Processing ea...
[ "0.5406376", "0.5285635", "0.5277233", "0.5269591", "0.5216304", "0.52033526", "0.5198252", "0.51780796", "0.5176623", "0.516422", "0.5149858", "0.51421016", "0.5128155", "0.5119425", "0.510754", "0.5094407", "0.5070182", "0.5065792", "0.5031172", "0.50137323", "0.49910444", ...
0.0
-1
clears the slackspace of files. Information of them is stored in metadata.
def clear(self): if self.fs_type == 'FAT': for file_entry in self.metadata.get_files(): file_metadata = file_entry['metadata'] file_metadata = FATAllocatorMeta(file_metadata) self.fs.clear(file_metadata) elif self.fs_type == 'NTFS': for file_entry in self.metadata.get_files(): file_metadata = file_entry['metadata'] file_metadata = NTFSAllocatorMeta(file_metadata) self.fs.clear(file_metadata) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean():\n clean_files()", "def clean_files(self):\n self.filenames.clear()", "def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n ...
[ "0.74671215", "0.7415675", "0.6979348", "0.6971708", "0.6971565", "0.689763", "0.68770945", "0.6817018", "0.6782567", "0.67809623", "0.6693809", "0.666803", "0.666092", "0.6653605", "0.6635497", "0.66262114", "0.6615348", "0.66098607", "0.6608693", "0.65980065", "0.6585141", ...
0.74563277
1
Returns the unit vector of the vector.
def unit_vector(vector): vector = np.array(vector) if np.linalg.norm(vector) <= 0.00010: normv = 1.0 else: normv = np.linalg.norm(vector) return vector / normv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit_vector(vector):\n #print 'unit_vector'\n #print vector\n #print type(vector)\n #npvector = np.array(vector)\n return vector / np.linalg.norm(vector)", "def _get_unit_vector(self, v):\n return v / np.linalg.norm(v)", "def get_unit_vector(self, vector):\n return vector / la....
[ "0.8355314", "0.8344175", "0.83402044", "0.83041203", "0.8288785", "0.82710177", "0.8263843", "0.8237869", "0.8219826", "0.8203201", "0.8180198", "0.8180198", "0.8180198", "0.8157419", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", "0.8116172", ...
0.7595422
37
ClairpbVulnerability a model defined in OpenAPI
def __init__(self, name=None, namespace_name=None, description=None, link=None, severity=None, metadata=None, fixed_by=None, affected_versions=None): # noqa: E501 # noqa: E501 self._name = None self._namespace_name = None self._description = None self._link = None self._severity = None self._metadata = None self._fixed_by = None self._affected_versions = None self.discriminator = None if name is not None: self.name = name if namespace_name is not None: self.namespace_name = namespace_name if description is not None: self.description = description if link is not None: self.link = link if severity is not None: self.severity = severity if metadata is not None: self.metadata = metadata if fixed_by is not None: self.fixed_by = fixed_by if affected_versions is not None: self.affected_versions = affected_versions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vulnerabilities(self) -> api.Vulnerabilities:\n return self._get_model(model=api.Vulnerabilities)", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)", "def __init__(self):\n ...
[ "0.54777217", "0.539541", "0.538349", "0.53488857", "0.53337604", "0.53252006", "0.5288814", "0.5252521", "0.5168391", "0.516711", "0.51452863", "0.51452863", "0.51452863", "0.51452863", "0.51452863", "0.5060443", "0.5005577", "0.5000991", "0.50000215", "0.49836135", "0.49804...
0.0
-1
Sets the name of this ClairpbVulnerability.
def name(self, name): self._name = name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.na...
[ "0.7476369", "0.7476369", "0.74305564", "0.74305564", "0.74305564", "0.74305564", "0.74305564", "0.7423173", "0.73924625", "0.73535186", "0.73535186", "0.73509634", "0.7336383", "0.7328558", "0.7323258", "0.73170054", "0.71923685", "0.71672845", "0.71672845", "0.71484566", "0...
0.0
-1
Sets the namespace_name of this ClairpbVulnerability.
def namespace_name(self, namespace_name): self._namespace_name = namespace_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_namespace(self, namespace: str) -> None:\n self._namespace = namespace", "def namespace(self, namespace: str):\n\n self._namespace = namespace", "def namespace(self, namespace):\n\n self._namespace = namespace", "def namespace(self, namespace):\n\n self._namespace = namesp...
[ "0.7218368", "0.6848428", "0.6652187", "0.6652187", "0.6394273", "0.61235774", "0.60623956", "0.59424466", "0.5783313", "0.57451713", "0.57072073", "0.56936276", "0.56725025", "0.5653841", "0.5579495", "0.55766636", "0.55388397", "0.5538237", "0.5520687", "0.55004734", "0.539...
0.7917395
0
Sets the description of this ClairpbVulnerability.
def description(self, description): self._description = description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_description(self, description):\r\n self.__description = description", "def set_description(self, description):\n self.description = description", "def set_description(self, description):\n self.__description = description", "def description(self, description):\n self._des...
[ "0.72097325", "0.7180765", "0.7136288", "0.7065354", "0.7065354", "0.7065354", "0.7065354", "0.7059732", "0.70556676", "0.7041413", "0.6891786", "0.6891786", "0.6891786", "0.6891786", "0.68883944", "0.68043464", "0.670095", "0.66983426", "0.6676779", "0.6659177", "0.6595515",...
0.69659215
38
Sets the link of this ClairpbVulnerability.
def link(self, link): self._link = link
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def link(self, link):\n\n self._set_field(\"link\", link)", "def link(self, link):\n\n self.container['link'] = link", "def href(self, href):\n\n self._href = href", "def href(self, href):\n\n self._href = href", "def href(self, href):\n\n self._href = href", "def href(...
[ "0.7018405", "0.6961583", "0.6805548", "0.6805548", "0.6805548", "0.6805548", "0.6280648", "0.62231255", "0.6152556", "0.61381596", "0.6075746", "0.597047", "0.59602845", "0.5926421", "0.59064543", "0.58775926", "0.5876989", "0.5859482", "0.58415675", "0.5837184", "0.58210844...
0.71737444
6
Sets the severity of this ClairpbVulnerability.
def severity(self, severity): self._severity = severity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def severity(self, severity):\n self._severity = severity", "def severity(self, severity):\n if severity is None:\n raise ValueError(\"Invalid value for `severity`, must not be `None`\") # noqa: E501\n\n self._severity = severity", "def severity(self, severity):\n if sev...
[ "0.77666795", "0.738541", "0.73160356", "0.7266142", "0.62618464", "0.6216128", "0.59445876", "0.59445876", "0.59445876", "0.5885588", "0.5885588", "0.5885588", "0.58778495", "0.58508825", "0.58508825", "0.562879", "0.5625883", "0.55735195", "0.55235606", "0.55169725", "0.550...
0.776778
0
Sets the metadata of this ClairpbVulnerability.
def metadata(self, metadata): self._metadata = metadata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_metadata(self, data):\r\n pass", "def set_metadata(self, attribute, value):\n self.metadata[attribute] = value", "def set_metadata(self, metadata):\n self.metadata = metadata\n return self", "def set_metadata(self, loadbalancer, metadata):\n return loadbalancer.set_...
[ "0.7049014", "0.6655038", "0.65871096", "0.63758785", "0.63146836", "0.63146836", "0.62831694", "0.6266832", "0.6266832", "0.6024687", "0.59310377", "0.59303904", "0.59018284", "0.58468807", "0.5824315", "0.58013225", "0.57796216", "0.5759306", "0.5752463", "0.57415015", "0.5...
0.6576822
7
Sets the fixed_by of this ClairpbVulnerability.
def fixed_by(self, fixed_by): self._fixed_by = fixed_by
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def issued_by(self, issued_by):\n\n self._issued_by = issued_by", "def mitigated_by(self, mitigated_by):\n\n self._mitigate...
[ "0.6088768", "0.6088768", "0.60354835", "0.594974", "0.56890184", "0.5542011", "0.5290293", "0.5184325", "0.5179588", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.5038125", "0.4941069", "0.49050403", "0.479892", "0.47826055", "0.47764...
0.8097117
0
Sets the affected_versions of this ClairpbVulnerability.
def affected_versions(self, affected_versions): self._affected_versions = affected_versions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def versions(self, versions):\n\n self._versions = versions", "def set_versions(self, consumer, versions):...
[ "0.609255", "0.609255", "0.59750617", "0.5626721", "0.54159355", "0.51944435", "0.51533484", "0.5120908", "0.50574327", "0.50420326", "0.50411284", "0.50248915", "0.48979875", "0.4828377", "0.48191965", "0.4787847", "0.46811602", "0.4634885", "0.46162087", "0.4607605", "0.460...
0.8137943
0
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n f...
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.6900...
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n ...
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442...
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n ...
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", ...
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, ClairpbVulnerability): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if i...
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", ...
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n ...
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Calculates the output size of the last conv layer.
def _get_conv_out(self, shape) -> int: conv_out = self.conv(torch.zeros(1, *shape)) return int(np.prod(conv_out.size()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_size(self) -> int:\n return self.output_dim", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n num_out...
[ "0.72758055", "0.7067759", "0.70510364", "0.7000888", "0.68895096", "0.6863038", "0.6814944", "0.68022835", "0.67512447", "0.67106795", "0.6696877", "0.6696877", "0.66832334", "0.66832334", "0.6663041", "0.6635598", "0.6611119", "0.6571467", "0.6549242", "0.65476173", "0.6491...
0.6925207
4
Forward pass through network.
def forward(self, input_x) -> Tensor: conv_out = self.conv(input_x).view(input_x.size()[0], -1) return self.head(conv_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self):\n pass", "def forward(self):\n pass", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward_pass(self):", "def...
[ "0.7486405", "0.7486405", "0.72931826", "0.72931826", "0.72931826", "0.72568643", "0.71754724", "0.70931304", "0.70689535", "0.7054133", "0.69913656", "0.6969786", "0.69356275", "0.69356275", "0.69356275", "0.6921335", "0.6920985", "0.6747466", "0.6711534", "0.67010707", "0.6...
0.0
-1
Forward pass through network.
def forward(self, input_x): return self.net(input_x.float())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self):\n pass", "def forward(self):\n pass", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward_pass(self):", "def...
[ "0.7486405", "0.7486405", "0.72931826", "0.72931826", "0.72931826", "0.72568643", "0.71754724", "0.70931304", "0.70689535", "0.7054133", "0.69913656", "0.6969786", "0.69356275", "0.69356275", "0.69356275", "0.6921335", "0.6920985", "0.6747466", "0.6711534", "0.67010707", "0.6...
0.0
-1
Forward pass through network. Calculates the action distribution.
def forward(self, x: FloatTensor) -> TanhMultivariateNormal: x = self.shared_net(x.float()) batch_mean = self.mean_layer(x) logstd = torch.clamp(self.logstd_layer(x), -20, 2) batch_scale_tril = torch.diag_embed(torch.exp(logstd)) return TanhMultivariateNormal( action_bias=self.action_bias, action_scale=self.action_scale, loc=batch_mean, scale_tril=batch_scale_tril )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n x = self.fc0(x.view(-1, x.size(-1))).view(x.size(0), x.size(1), -1)\n x = self.pe(x)\n\n x = self.inner_layers(x) # FF, FF, FF, finalFF\n\n state_value = self.fc_s(x) # double-dqn : state\n\n advantage_values = self.fc_a(x) # double-dqn : advantage\n ...
[ "0.6780671", "0.6720904", "0.66573745", "0.65337664", "0.6500491", "0.6433926", "0.6398824", "0.63217956", "0.6288894", "0.62833714", "0.62578505", "0.62466264", "0.6242794", "0.62398285", "0.6232702", "0.6224309", "0.6224309", "0.6190171", "0.61818534", "0.61756337", "0.6167...
0.0
-1
Get the action greedily (without sampling)
def get_action(self, x: FloatTensor) -> Tensor: x = self.shared_net(x.float()) batch_mean = self.mean_layer(x) return self.action_scale * torch.tanh(batch_mean) + self.action_bias
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_action(self, state):\n return self.env.action_space.sample()", "def choose_random_action(env):\n return env.action_space.sample()", "def select_action(self, state):\n\t\treturn sample(range(0, self.action_space), 1)[0]", "def get_action(self, s, eval=False):\n if eval:\n w...
[ "0.7114393", "0.70404077", "0.70395607", "0.6984486", "0.6971175", "0.6954252", "0.6913596", "0.6889728", "0.68695825", "0.6852474", "0.68420124", "0.6805876", "0.6799169", "0.67951995", "0.6754626", "0.6733447", "0.6733447", "0.6664213", "0.664351", "0.663247", "0.66250086",...
0.0
-1
Forward pass through network. Calculates the action logits and the value.
def forward(self, x) -> Tuple[Tensor, Tensor]: x = F.relu(self.fc1(x.float())) a = F.log_softmax(self.actor_head(x), dim=-1) c = self.critic_head(x) return a, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n # action\n act = self.act_fc1(x)\n act = torch.tanh(act)\n act = self.act_fc2(act)\n act = torch.tanh(act)\n mean = self.mu(act) # N, num_actions\n logstd = self.logstd.expand_as(mean)\n std = torch.exp(logstd)\n action = torch...
[ "0.7648056", "0.7441679", "0.7235342", "0.7074458", "0.7050156", "0.69769365", "0.6912717", "0.6907993", "0.69072765", "0.6904189", "0.6899755", "0.6880525", "0.6867396", "0.6824801", "0.6819598", "0.67802507", "0.67745304", "0.6761813", "0.6747654", "0.6747654", "0.67306876"...
0.6286952
67
Forward pass through network. Calculates the Q using the value and advantage.
def forward(self, input_x): adv, val = self.adv_val(input_x) return val + (adv - adv.mean(dim=1, keepdim=True))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, state):\n x = state\n feature = self.feature_layer(x)\n action_value = self.value_layer(feature)\n advantage = self.advantage_layer(feature)\n \n q_value = action_value + (advantage - advantage.mean(dim=1, keepdim=True))\n return q_value", "def a...
[ "0.67463326", "0.6215198", "0.6126189", "0.60463625", "0.6016947", "0.60107875", "0.600079", "0.598151", "0.5964633", "0.595709", "0.5950874", "0.59488696", "0.5943859", "0.5934017", "0.59093964", "0.5896094", "0.58890605", "0.5857793", "0.58310497", "0.58174837", "0.579977",...
0.5647406
42
Gets the advantage and value by passing out of the base network through the value and advantage heads.
def adv_val(self, input_x) -> Tuple[Tensor, Tensor]: float_x = input_x.float() base_out = self.net(float_x) return self.fc_adv(base_out), self.fc_val(base_out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bias(self):", "def __get_net_probs(self):\n return np.array([node.value for node in self.net]).reshape(5,5)", "def net_output(self):\n result = self.gives()\n for k, v in self.needs().items():\n result[k] = result.get(k, 0) - v\n\n return result", "def forward(self,...
[ "0.5776241", "0.5750878", "0.5623462", "0.54842955", "0.5386926", "0.52231693", "0.52097505", "0.5197751", "0.5113644", "0.5086685", "0.5073465", "0.50708735", "0.5056254", "0.5033763", "0.5021825", "0.5015476", "0.50106674", "0.5005514", "0.4995753", "0.49826834", "0.4966393...
0.5109669
9