query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Filled name (if applicable).
def nn_filled(self): if self.parent in filled_variables: return f"{self._nn_fill_root} {self.shift}M" return self._nn_fill_root
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self, name):\n pass", "def name():\n pass", "def name():\n pass", "def getName(self):\n return \"\"", "def name_field(self):\r\n return 'name'", "def test_name_empty_string(self):\r\n self.name = \"\"", "def name(self) -> str: # pragma: no cover",...
[ "0.69951504", "0.6751056", "0.6751056", "0.67024434", "0.6659543", "0.6640836", "0.65940964", "0.65553534", "0.65237314", "0.6510735", "0.64991957", "0.64991957", "0.64991957", "0.64991957", "0.64991957", "0.647957", "0.647957", "0.647957", "0.647957", "0.6468007", "0.6453826...
0.0
-1
Sort variables based on their rank and shift. Note that this relies on all variables having a unique rank.
def sort_variables(variables): return tuple(sorted(variables, key=lambda v: (v.rank, v.shift)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort(self, varnames):\n varnames = self._find_vars(varnames, unique=True, empty_ok=False)\n var_ind_list = list(map(self._varlist.index, varnames))\n new_srtlist = var_ind_list + [None]*(self._nvar - len(varnames))\n if self._srtlist == new_srtlist:\n return\n sort...
[ "0.6356814", "0.59954023", "0.5840146", "0.57373804", "0.5699405", "0.56901157", "0.5626263", "0.5604119", "0.55457306", "0.55165404", "0.55016434", "0.5487663", "0.5474271", "0.5438897", "0.5397592", "0.5390707", "0.53346616", "0.5334493", "0.53060263", "0.5277838", "0.51805...
0.74932057
0
Given a set of criteria, find the matching variables(s).
def get_matching(variables, strict=True, single=True, **criteria): matching = [] for var in variables: for crit_name, crit_info in criteria.items(): if getattr(var, crit_name) == crit_info: continue else: break else: matching.append(var) if not matching and strict: raise RuntimeError("No matching variables were found.") if single: if len(matching) > 1: raise RuntimeError( f"Expected to find 1 matching variable. Found '{matching}'." ) if not matching: return () return matching[0] return tuple(matching)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_variables(md):\n\n # save as dictionaries with searchers as keys\n x_searchers = {}\n b_target = {}\n\n t_max = 0\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n # print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_va...
[ "0.6416395", "0.6279023", "0.61213815", "0.5955996", "0.5882055", "0.58465207", "0.58342683", "0.5712536", "0.56871027", "0.56653214", "0.54515976", "0.5413046", "0.54008603", "0.5400208", "0.53811234", "0.5380468", "0.53650934", "0.53530097", "0.53530097", "0.5344481", "0.53...
0.7268741
0
Match variable to VariableFactory using rank, name, and units.
def match_factory(variable, factories): if not isinstance(factories, tuple): factories = (factories,) for factory in factories: if ( variable.rank == factory.rank and variable.name == factory.name and variable.units == factory.units ): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variable_factory(p, variable_name):\n if isinstance(variable_name, (Variable,)):\n return variable_name\n if not hasattr(p, \"variable_mapping\"):\n setattr(p, \"variable_mapping\", {})\n if variable_name not in p.variable_mapping:\n p.variable_mapping[variable_name] = Variable(va...
[ "0.6039259", "0.5748577", "0.5715186", "0.56820124", "0.56072986", "0.5535535", "0.55185264", "0.53700167", "0.5367385", "0.536188", "0.53586817", "0.53523105", "0.5274955", "0.5252807", "0.52485764", "0.5248227", "0.5243896", "0.51856565", "0.5179017", "0.517816", "0.5174382...
0.6425409
0
Get the lags for a given VariableFactory.
def get_variable_lags(var_factory): if var_factory in shifted_variables: return lags return (0,)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_shifted_variables(var_factory):\n shifted = []\n for lag in get_variable_lags(var_factory):\n shifted.append(var_factory[lag])\n return tuple(shifted)", "def lag(self):\n self._assert_counted_at_lag()\n return self._lag", "def create_predictors(y): # pragma: no cover\n ...
[ "0.67575127", "0.5803747", "0.5776367", "0.54579306", "0.54395133", "0.5426974", "0.53509307", "0.53438425", "0.5341233", "0.52827334", "0.5247959", "0.5246997", "0.5176305", "0.50930464", "0.5059406", "0.50513554", "0.5010745", "0.4938749", "0.4918975", "0.49158582", "0.4873...
0.87622905
0
Get all possible shifted variables given a VariableFactory.
def get_shifted_variables(var_factory): shifted = [] for lag in get_variable_lags(var_factory): shifted.append(var_factory[lag]) return tuple(shifted)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_variable_lags(var_factory):\n if var_factory in shifted_variables:\n return lags\n return (0,)", "def vars(self):\n return [Var(i,self.dims[i]) for i in range(self.nvar)] # TODO: use stored state info (=1 sometimes)", "def get_all_variables(self):\n out = []\n for i in se...
[ "0.69359124", "0.5861063", "0.5553798", "0.5553798", "0.5553798", "0.5403311", "0.53172415", "0.5283232", "0.5274266", "0.5179303", "0.5173764", "0.51522267", "0.51142913", "0.50858605", "0.5057335", "0.5056801", "0.5049597", "0.50440574", "0.49971545", "0.49796465", "0.49775...
0.8306546
0
Determina el vertice con mayor grado que no este coloreado ni pertenezca la lista de tabues.
def _max_vdegree_with_sdegree(self, semi_coloring, tabu_list=[]): best_v = None for v in self.vertices(): # comprobamos que el vertice no este coloreado y no pertenezca # a tabu_list if not semi_coloring[v] and not v in tabu_list: if not best_v: best_v = v if self._vertex_degree[v] > self._vertex_degree[best_v]: best_v = v return best_v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vitoria_1(tab,jog):\r\n for i in range(1,4):\r\n win = [(0,jog,jog), (jog,0,jog), (jog,jog,0)]\r\n coluna = obter_coluna(tab, i)\r\n linha = obter_linha(tab, i) \r\n if coluna in win:\r\n return i+3*win.index(coluna)\r\...
[ "0.57876396", "0.56284934", "0.5594405", "0.5568034", "0.55080384", "0.5484388", "0.54362154", "0.5413025", "0.5407047", "0.5402674", "0.53891736", "0.5375746", "0.53344095", "0.53134215", "0.5284173", "0.5273966", "0.5259709", "0.52496856", "0.5219884", "0.52110505", "0.5200...
0.5004707
42
Determina el minimo umbral kcromatico de TSC usando un metodo de coloracion secuencial que toma dos vertices en cada paso.
def ThresholdSpectrumColoring(self, k): # inicializamos una coloracion y limpiamos la memoria semi_coloring = self._new_coloring() # reducimos el numero de colores del espectro a k spectrum = self._spectrum[:k] # vertices = self.vertices() n_colored = 0 n_vertices = len(self.vertices()) while n_colored < n_vertices: # tomamos el vertices con mayor grado entre los no visitados vertex = self._max_vdegree_with_sdegree(semi_coloring) self._vertex_order.append(vertex) # tomamos el color con la menor potencial interferencia para el vertice color = self._min_semi_interference(vertex, semi_coloring, spectrum) # asignamos el color al vertice semi_coloring[vertex] = color n_colored+=1 # actualizamos los valores de las memorias self._update_values(vertex, color, semi_coloring) # tomamos el vertice de mayor grado no adyacente al que seleccionamos # anteriormente, y en caso de que exista, repetimos el mismo proceso fneighbour = self._max_vdegree_with_sdegree(semi_coloring, self._graph.neighbours(vertex)) if fneighbour is not None: self._vertex_order.append(fneighbour) color = self._min_semi_interference(fneighbour, semi_coloring, spectrum) semi_coloring[fneighbour] = color n_colored+=1 self._update_values(fneighbour, color, semi_coloring) return self.threshold(semi_coloring), semi_coloring
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tabu_precoloring(*args):\r\n # get arguments\r\n G = args[0]\r\n n = G.nodes()\r\n m = G.arcs()\r\n \r\n # check if it a valid Graph\r\n if not G.is_correct_type('u'):\r\n print \"ERROR: the graph is not in one of the valid formats for tabu_coloring()\"\r\n return [], []\r\n ...
[ "0.5584136", "0.55213815", "0.551191", "0.549267", "0.546483", "0.5403701", "0.53459257", "0.5344636", "0.5285508", "0.52787477", "0.5213398", "0.5209369", "0.5208083", "0.5198757", "0.5198398", "0.5157316", "0.5147865", "0.5131297", "0.5127294", "0.51265067", "0.5125503", ...
0.53559506
6
Returns the path to our major ldso symlink. (Which allows us to change which ldso we are actively using without patching a bunch of binaries)
def ld_linux_path(root): return os.path.join(root, 'lib', 'ld-linux-xpkg.so')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_ld_version():\n if sys.platform == 'darwin':\n return _find_exe_version('ld -v', _MAC_OS_X_LD_VERSION)\n else:\n return _find_exe_version('ld -v')", "def get_linked_libpython():\n if is_windows():\n return\n libdl = ctypes.CDLL(ctypes.util.find_library(\"dl\"))\n lib...
[ "0.66215503", "0.6279569", "0.5894043", "0.5856724", "0.5856588", "0.5700655", "0.56694955", "0.55614096", "0.5547654", "0.55200994", "0.5495592", "0.54438263", "0.5425426", "0.5324425", "0.5300233", "0.5298378", "0.52957606", "0.5281993", "0.52785194", "0.5256666", "0.525556...
0.6632493
0
If the x,y equal, we assume that the both Nodes are same
def equal(self,other): if(self.x == other.x) and (self.y == other.y): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_same_as(self, other) -> bool:\n return self.x == other.x and self.y == other.y", "def compareNodes(x, y):\n return x.pathValue - y.pathValue", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, second):\r\n\t\treturn self.x == other.x and se...
[ "0.7050482", "0.70344174", "0.69647264", "0.693082", "0.69281536", "0.68705666", "0.68475515", "0.68315655", "0.67769325", "0.67707807", "0.67489284", "0.67242", "0.67242", "0.6677688", "0.6663041", "0.6656221", "0.6656221", "0.6656221", "0.6656221", "0.6656221", "0.66506815"...
0.69610524
3
For each node, the cost of getting from the start node to that node.
def create_gScore(width,height): gScore = [[np.inf for i in range(width)] for i in range(height)] return gScore
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(self):\n node, path_back = self, []\n cost = 0\n while node:\n path_back.append(node)\n if node.action is not None:\n cost = cost + node.action.cost\n node = node.parent\n # remove one due to root empty node \n #cost = c...
[ "0.7186682", "0.70747066", "0.676554", "0.65974796", "0.6534421", "0.6378079", "0.6378079", "0.631064", "0.6299445", "0.6267691", "0.6214393", "0.6190077", "0.6176873", "0.61578125", "0.61578125", "0.61578125", "0.6116098", "0.60726243", "0.6060423", "0.60556203", "0.6041697"...
0.0
-1
For each node, the cost of getting from the start node to that node.
def create_fScore(width,height): fScore = [[np.inf for i in range(width)] for i in range(height)] return fScore
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(self):\n node, path_back = self, []\n cost = 0\n while node:\n path_back.append(node)\n if node.action is not None:\n cost = cost + node.action.cost\n node = node.parent\n # remove one due to root empty node \n #cost = c...
[ "0.7186859", "0.70753443", "0.6766204", "0.6596982", "0.65339065", "0.63787216", "0.63787216", "0.63113326", "0.63002783", "0.6268371", "0.6213697", "0.6189538", "0.6177489", "0.6158397", "0.6158397", "0.6158397", "0.611681", "0.6072511", "0.60598373", "0.6056283", "0.6042592...
0.0
-1
Diagonal distance h_diagonal(n) = min(abs(n.x goal.x), abs(n.y goal.y)) h_straight(n) = (abs(n.x goal.x) + abs(n.y goal.y)) h(n) = D_diagnoal h_diagonal(n) + D_straight (h_straight(n) 2h_diagonal(n)))
def heuristic_cost_estimate(start, goal,d_diagnoal,d_straight): start_x = start.x start_y = start.y goal_x = goal.x goal_y = goal.y h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y)) h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y) h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal) return h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dist_between(current, neighbor,d_diagnoal,d_straight):\n start_x = current.x\n start_y = current.y\n goal_x = neighbor.x\n goal_y = neighbor.y\n\n h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))\n h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)\n h = d_...
[ "0.8049952", "0.6934964", "0.69334024", "0.6747959", "0.644159", "0.62714773", "0.6160783", "0.6144977", "0.6144977", "0.61420053", "0.6101649", "0.6096123", "0.60647726", "0.6007809", "0.59770143", "0.5892348", "0.5882317", "0.5860657", "0.5833256", "0.58259183", "0.57475424...
0.7815696
1
Diagonal distance h_diagonal(n) = min(abs(n.x goal.x), abs(n.y goal.y)) h_straight(n) = (abs(n.x goal.x) + abs(n.y goal.y)) h(n) = D_diagnoal h_diagonal(n) + D_straight (h_straight(n) 2h_diagonal(n)))
def dist_between(current, neighbor,d_diagnoal,d_straight): start_x = current.x start_y = current.y goal_x = neighbor.x goal_y = neighbor.y h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y)) h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y) h = d_diagnoal * h_diagonal + d_straight * (h_straight - 2 * h_diagonal) return h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heuristic_cost_estimate(start, goal,d_diagnoal,d_straight):\n start_x = start.x\n start_y = start.y\n goal_x = goal.x\n goal_y = goal.y\n\n h_diagonal = min(np.abs(start_x - goal_x),np.abs(start_y - goal_y))\n h_straight = np.abs(start_x - goal_x) + np.abs(start_y - goal_y)\n h = d_diagnoa...
[ "0.7815963", "0.6935118", "0.69333106", "0.6748271", "0.6441967", "0.6272037", "0.616098", "0.614516", "0.614516", "0.6142244", "0.61005026", "0.6096975", "0.60649806", "0.60074985", "0.59759593", "0.58921903", "0.5882251", "0.58594304", "0.58325016", "0.5826039", "0.57475173...
0.80509466
0
Get neighbors of current node
def getNeighbors(current,width,height): x = current.x y = current.y neighbors = np.array([[x],[y]]) if (x - 1) >= 0: t = np.array([[x - 1], [y]]) neighbors = np.hstack((neighbors, t)) if (y - 1) >= 0: t = np.array([[x - 1], [y - 1]]) neighbors = np.hstack((neighbors,t)) if (y + 1) < width: t = np.array([[x - 1], [y + 1]]) neighbors = np.hstack((neighbors,t)) if (y - 1) >= 0: t = np.array([[x], [y - 1]]) neighbors = np.hstack((neighbors, t)) if (y + 1) < width: t = np.array([[x], [y + 1]]) neighbors = np.hstack((neighbors, t)) if (x + 1) < height: t = np.array([[x + 1], [y]]) neighbors = np.hstack((neighbors, t)) if (y - 1) >= 0: t = np.array([[x + 1], [y - 1]]) neighbors = np.hstack((neighbors,t)) if (y + 1) < width: t = np.array([[x + 1], [y + 1]]) neighbors = np.hstack((neighbors,t)) neighbors = neighbors[:,1:] return neighbors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def neighbors(self, node):\r\n return list(self.graph.neighbors(node))", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.neighbors", "def get_neighbors(self):\n return self.nei...
[ "0.82076806", "0.81595117", "0.81595117", "0.81595117", "0.81595117", "0.81556916", "0.81038094", "0.8046027", "0.80142957", "0.79389143", "0.7928019", "0.7904153", "0.78723794", "0.7864975", "0.78584856", "0.7812342", "0.78112066", "0.77656096", "0.77593744", "0.772959", "0....
0.66926175
100
Get path of A
def reconstruct_path(cameFrom, current): total_path = np.array([[current.x],[current.y]]) while current_in_cameFrom(current,cameFrom): current = current.father node_x = current.x node_y = current.y node_pos = np.array([[node_x],[node_y]]) total_path = np.hstack((total_path,node_pos)) l1 = total_path[0,:] l1 = l1[::-1] l2 = total_path[1,:] l2 = l2[::-1] total_path = np.vstack((l1,l2)) return total_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPath(obj):", "def get_path(self):\n return self.path", "def _get_as_path(self):\n return self.__as_path", "def __get_path(self):\n return self.path", "def path(self) -> Path:\n return self[0]", "def get_path(self):\n return self.path", "def __path(self):\n if se...
[ "0.7251213", "0.6919148", "0.6838147", "0.68078834", "0.6688719", "0.66773564", "0.6611869", "0.65055686", "0.6474741", "0.64676434", "0.6441411", "0.6440321", "0.6404309", "0.6396458", "0.6394175", "0.6383371", "0.63829833", "0.6375241", "0.6375075", "0.63631576", "0.6362070...
0.0
-1
convert the path in real to grid, e.g. 21 > 2.15 sx= ix reso + reso/2
def convertGridPathToReal(pathInGrid, sx, sy, gx, gy, grid_reso = 0.1): pathInReal = (pathInGrid * grid_reso + grid_reso / 2) stepNum = pathInReal.shape[1] # Replace head and tail pathInReal[:, 0] = [sx, sy] pathInReal[:, 0] = [sx, sy] pathInReal[:, stepNum - 1] = [gx, gy] pathInReal[:, stepNum - 1] = [gx, gy] return pathInReal
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fifteen():\r\n\r\n currentcell = 1.0\r\n cellpaths = 2.0\r\n \r\n while currentcell < 20.0:\r\n currentcell += 1.0\r\n cellpaths = cellpaths * (4.0 - 2.0/currentcell)\r\n \r\n return cellpaths", "def gen_grids(self):\n self.dx = self.grid_width / self.grid_resol\n ...
[ "0.5953606", "0.5806127", "0.57640606", "0.5755406", "0.57439196", "0.5740067", "0.5653222", "0.5591101", "0.55833936", "0.5580926", "0.5577852", "0.5570575", "0.5565106", "0.55632424", "0.5562481", "0.5536194", "0.55283296", "0.5520637", "0.5520081", "0.5514781", "0.55122167...
0.7544195
0
A algorithm A square 6m x 3m region
def aStar(sx = 1.55, sy = 2.05, gx = 1.55, gy = 4.05, d_diagnoal = 14, d_straight = 10, grid_reso = 0.1, grid_width = 6, grid_height = 3): width = int(grid_width/grid_reso) height = int(grid_height/grid_reso) #TODO A_sx, A_sy = realPosTogridPos(sx, sy, grid_reso = grid_reso) A_gx, A_gy = realPosTogridPos(gx, gy, grid_reso = grid_reso) startNode = Node(A_sx,A_sy,None,0,0,0) goalNode = Node(A_gx,A_gy,None,0,0,0) # The set of nodes already evaluated closedSet = set() # The set of currently discovered nodes that are not evaluated yet. openSet = set() # Initially, only the start node is known. openSet.add(startNode) # For each node, which node it can most efficiently be reached from.If a node can be reached from many nodes, cameFrom will eventually contain the most efficient previous step. cameFrom = [] # For each node, the cost of getting from the start node to that node. gScore = create_gScore(width, height) start_x = startNode.x start_y = startNode.y # The cost of going from start to start is zero. startNode.g_value = 0 gScore[start_x][start_y] = 0 # For each node, the total cost of getting from the start node to the goal by passing by that node. That value is partly known, partly heuristic. fScore = create_fScore(width, height) # For the first node, that value is completely heuristic. startNode.f_value = heuristic_cost_estimate(startNode, goalNode,d_diagnoal,d_straight) fScore[start_x][start_y] = heuristic_cost_estimate(startNode, goalNode,d_diagnoal,d_straight) while len(openSet) != 0: # current := the node in openSet having the lowest fScore[] value current = node_lowest_fScore(openSet) # If it is the item we want, retrace the path and return it if current.equal(goalNode): path = reconstruct_path(cameFrom, current) # path in real # print "path",path pathInReal = convertGridPathToReal(path, sx, sy, gx, gy, grid_reso = grid_reso) # path in grid return pathInReal openSet.remove(current) closedSet.add(current) current_neighbors = getNeighbors(current, width, height) current_neighbors_num = current_neighbors.shape[1] # for neighbor in current_neighbors: for index in range(current_neighbors_num): [neighbor_x,neighbor_y] = current_neighbors[:,index] neighbor = Node(neighbor_x,neighbor_y,None,np.inf,np.inf,np.inf) if neighbor_in_closedSet(neighbor,closedSet): continue if neighbor_not_in_openSet(neighbor,openSet): # Discover a new node openSet.add(neighbor) # The distance from start to a neighbor the "dist_between" function may vary as per the solution requirements. current_x = current.x current_y = current.y tentative_gScore = gScore[current_x][current_y] + dist_between(current, neighbor,d_diagnoal,d_straight) neighbor_x = neighbor.x neighbor_y = neighbor.y if tentative_gScore >= gScore[neighbor_x][neighbor_y]: continue # This is not a better path. neighbor.father = current cameFrom.append(neighbor) gScore[neighbor_x][neighbor_y] = tentative_gScore neighbor.g_value = tentative_gScore neighbor_f_value = gScore[neighbor_x][neighbor_y] + heuristic_cost_estimate(neighbor, goalNode,d_diagnoal,d_straight) fScore[neighbor_x][neighbor_y] = neighbor_f_value neighbor.f_value = neighbor_f_value return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def phantom_squares(n_points,S):\n \n #Rescaling according to image size \n S[:,0] = S[:,0]*n_points/2\n S[:,1] = S[:,1]*n_points/2\n S[:,2] = S[:,2]*n_points/2\n S[:,3] = S[:,3]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 ) \n ...
[ "0.6708951", "0.6398944", "0.62836295", "0.6229607", "0.6192689", "0.6141865", "0.6077024", "0.6006059", "0.5976849", "0.597339", "0.59075284", "0.58768487", "0.5871641", "0.5842441", "0.58300805", "0.5829569", "0.5814981", "0.5807813", "0.5806518", "0.5789432", "0.5779059", ...
0.0
-1
One dimensional power law model function
def evaluate(x, amplitude, x_0, alpha): xx = x / x_0 return amplitude * xx ** (-alpha)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )", "def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def f_mw(f):\n re...
[ "0.6872667", "0.6868249", "0.64523995", "0.639687", "0.62560534", "0.62294555", "0.6126374", "0.6074723", "0.6066187", "0.6004509", "0.5986175", "0.5976839", "0.5966759", "0.59441537", "0.5944057", "0.5937536", "0.5917039", "0.5900731", "0.58842295", "0.5877201", "0.5864981",...
0.0
-1
One dimensional power law derivative with respect to parameters
def fit_deriv(x, amplitude, x_0, alpha): xx = x / x_0 d_amplitude = xx ** (-alpha) d_x_0 = amplitude * alpha * d_amplitude / x_0 d_alpha = -amplitude * d_amplitude * np.log(xx) return [d_amplitude, d_x_0, d_alpha]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dgdy(self, X):\n \n return 3*X[1]**2", "def dV(X):\n return -4 * a * np.power(X, 3) + 2 * b * X", "def derivatives(x_p, y_p):\r\n # set up the matrix equation\r\n n = x_p.shape[0]\r\n M = np.zeros( [n,n] )\r\n d = np.zeros( [n,1] )\r\n \r\n # fill in the constants where t...
[ "0.6734102", "0.65710866", "0.64798695", "0.6456969", "0.64338213", "0.64268124", "0.63887954", "0.63887954", "0.634643", "0.63460815", "0.6338571", "0.63372827", "0.6308936", "0.63080084", "0.6305035", "0.6289483", "0.62714446", "0.62588847", "0.62584984", "0.6250596", "0.62...
0.6106129
44
One dimensional broken power law model function
def evaluate(x, amplitude, x_break, alpha_1, alpha_2): alpha = np.where(x < x_break, alpha_1, alpha_2) xx = x / x_break return amplitude * xx ** (-alpha)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha", "def __pow__(self,power):\n return Factor().__build( VarSet(self.v) , np.power(self.t,power) )", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def ForceFitPowerlaw...
[ "0.6540572", "0.64533246", "0.62271285", "0.6183615", "0.6153722", "0.6143716", "0.6098002", "0.6049835", "0.6040145", "0.6030009", "0.6010443", "0.59917516", "0.5971195", "0.5968484", "0.59673804", "0.59626305", "0.5958735", "0.59169805", "0.5871685", "0.5862314", "0.5814751...
0.0
-1
One dimensional broken power law derivative with respect to parameters
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2): alpha = np.where(x < x_break, alpha_1, alpha_2) xx = x / x_break d_amplitude = xx ** (-alpha) d_x_break = amplitude * alpha * d_amplitude / x_break d_alpha = -amplitude * d_amplitude * np.log(xx) d_alpha_1 = np.where(x < x_break, d_alpha, 0) d_alpha_2 = np.where(x >= x_break, d_alpha, 0) return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dgdy(self, X):\n \n return 3*X[1]**2", "def derivativeW(self, *args):\n if self.n_dims >= 4:\n j = 0\n else:\n assert (\n False\n ), \"Derivative with respect to W can't be called when n_dims < 4!\"\n if self.i_dim == j:\n ...
[ "0.6551897", "0.646485", "0.64178663", "0.64106286", "0.6382745", "0.6354045", "0.63523704", "0.6345646", "0.6331496", "0.63267994", "0.6326617", "0.63140196", "0.63140196", "0.6313484", "0.6305103", "0.6294662", "0.6289381", "0.6286769", "0.6270342", "0.62632966", "0.6260843...
0.0
-1
One dimensional exponential cutoff power law model function
def evaluate(x, amplitude, x_0, alpha, x_cutoff): xx = x / x_0 return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def dd_xpowalpha(cls,grid,alpha,cutoff=False):\n grid.l.info('bc.hom: Setting initial data to (-x)^alpha.')\n ...
[ "0.6795391", "0.66082", "0.6514522", "0.6512951", "0.6474245", "0.63665336", "0.6313782", "0.62870765", "0.628231", "0.62377995", "0.6200624", "0.614855", "0.6140678", "0.6140613", "0.61380404", "0.61372375", "0.61360735", "0.6134497", "0.6130171", "0.6122109", "0.61198056", ...
0.6441985
5
One dimensional exponential cutoff power law derivative with respect to parameters
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff): xx = x / x_0 xc = x / x_cutoff d_amplitude = xx ** (-alpha) * np.exp(-xc) d_x_0 = alpha * amplitude * d_amplitude / x_0 d_alpha = -amplitude * d_amplitude * np.log(xx) d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2 return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def exponential(self, data=[], init_lambdas=[1,0.75], max_iteration=500):\r\n xaxis = np.arange(1, len(data)+1)\r\n data = np.array(data)\r\n idx = 1\r\n ...
[ "0.6435805", "0.641947", "0.63531476", "0.6331413", "0.6303507", "0.6297258", "0.62600297", "0.62595487", "0.6257403", "0.6225996", "0.619326", "0.6160021", "0.6141522", "0.61281365", "0.6099849", "0.6092616", "0.6091917", "0.6058767", "0.6058701", "0.6036378", "0.60332775", ...
0.6778236
0
One dimensional log parabola model function
def evaluate(x, amplitude, x_0, alpha, beta): xx = x / x_0 exponent = -alpha - beta * np.log(xx) return amplitude * xx ** exponent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def _perplexity(self, X, log_w):\n return np.exp(-log_w/X.sum())", "def nlogl...
[ "0.68354905", "0.68036896", "0.67386776", "0.66810524", "0.6678433", "0.6672779", "0.6672465", "0.66694707", "0.6639666", "0.6583274", "0.6557876", "0.6509242", "0.6490352", "0.6488276", "0.645247", "0.6448294", "0.6431051", "0.6431051", "0.64194", "0.640911", "0.64002174", ...
0.0
-1
One dimensional log parabola derivative with respect to parameters
def fit_deriv(x, amplitude, x_0, alpha, beta): xx = x / x_0 log_xx = np.log(xx) exponent = -alpha - beta * log_xx d_amplitude = xx ** exponent d_beta = -amplitude * d_amplitude * log_xx ** 2 d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0) d_alpha = -amplitude * d_amplitude * log_xx return [d_amplitude, d_x_0, d_alpha, d_beta]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MvNormalLogp():\n cov = pt.matrix(\"cov\")\n cov.tag.test_value = floatX(np.eye(3))\n delta = pt.matrix(\"delta\")\n delta.tag.test_value = floatX(np.zeros((2, 3)))\n\n cholesky = Cholesky(lower=True, on_error=\"nan\")\n\n n, k = delta.shape\n n, k = f(n), f(k)\n chol_cov = cholesky(cov...
[ "0.69615203", "0.6887294", "0.6852058", "0.6715104", "0.6654081", "0.66425574", "0.66307074", "0.6591338", "0.6572366", "0.65332234", "0.65331346", "0.65201646", "0.65035284", "0.64703155", "0.6436214", "0.6433464", "0.64074075", "0.6404806", "0.6395806", "0.6379538", "0.6374...
0.5986274
84
Generate strong password to add to csv file and clipboard.
def generate_pw(): chars = string.ascii_letters + string.digits + '!@#$%^&*()' password = ''.join(random.choice(chars) for i in range(16)) pyperclip.copy(password) print('Password copied to clipboard.') return password
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def password_generate_strong(self, ctx, delimeter: str = \"\"):\n d = delimeter\n rc = random.choice\n rr = random.randint\n await ctx.send(\n d.join(rc(RANDOM_WORDS).capitalize() for i in range(3)) + f\"{d}{rr(1,1000)}\"\n )", "def giveReadablePassword():\n ...
[ "0.6699509", "0.66523397", "0.66337395", "0.65883833", "0.6544499", "0.651341", "0.64864033", "0.6481383", "0.64227664", "0.63991195", "0.63651586", "0.63424796", "0.6329188", "0.6299538", "0.62843406", "0.6273658", "0.62399065", "0.6230091", "0.6219999", "0.6194473", "0.6178...
0.7241065
0
Add new account to pw.csv and generate a strong password.
def main(script): try: # ensure user entered account name and user name account_name = sys.argv[1] user_name = sys.argv[2] except IndexError: print('python add_pw.py [account name] [user name]') else: # read in csv file pw_file = open('pw.csv') pw_object = csv.reader(pw_file) # ensure account does not already exist in pw.csv for row in pw_object: if row[0] == account_name: print('Account already exists.') break # append account name, user name, and password generated by function else: with open('pw.csv', 'a', newline='') as csvfile: writer = csv.writer(csvfile) password = generate_pw() writer.writerow([account_name, user_name, password])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_password():\n new_pass = generate_password()\n entry_pass.delete(0, END)\n entry_pass.insert(0, new_pass)", "def add_user(self, user, pw):\n self.db.execute(\"INSERT INTO user_credentials VALUES (?, ?)\", [user, pw])\n self.db.commit()", "def save_password(self, new_password):\n...
[ "0.68463534", "0.6339493", "0.6226887", "0.6119043", "0.6009385", "0.59483135", "0.58453876", "0.5821747", "0.5782089", "0.5744898", "0.5735216", "0.57338196", "0.573278", "0.57301116", "0.5707284", "0.5689022", "0.5674017", "0.5670386", "0.56640315", "0.56365216", "0.5633996...
0.6759446
1
'o' contains a single object ['b','yttywetywe']
def to_server(self, o): assert type(o) == str # add to queue self.toserverqueue.put(o, block=False) # send now, if appropriate if self.buffer_tx==False: self.periodicTimer.fireNow()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def object_here(obj=None): #py:object_here\n if obj is not None:\n ans = RUR._object_here_(obj)\n else:\n ans = RUR._object_here_()\n return list(ans) # convert from JS list-like object to proper Python list", "def iterencode(self, o):\n if self.check_circular:\n marker...
[ "0.56176996", "0.5485273", "0.5438254", "0.54378515", "0.54331356", "0.5427594", "0.5410083", "0.5350562", "0.53243625", "0.5319603", "0.5232624", "0.52030325", "0.51990336", "0.5178645", "0.51718277", "0.51699454", "0.515686", "0.51475954", "0.51456654", "0.514488", "0.51199...
0.0
-1
Send one HTTP POST request to server to (1) send the elements from toserverqueue to the server (2) receive the objects from the server and rearm.
def _poll_server(self, o=None): # send objects try: # create HTTP body body = { 'id': self.id, 'token': self.token, 'ttl': self.polling_period+3, } # objects o = [] while True: try: e = self.toserverqueue.get(block=False) except Queue.Empty: break else: o += [e] if o: body['o'] = o # send to server r = requests.post( self.server_url, json = body, ).json() r = convertToString(r) except requests.exceptions.ConnectionError as err: self._set_is_connected(False) logger.error(err) except Exception as err: self._set_is_connected(False) logger.error(err) else: self._set_is_connected(True) if r['o']: self.from_server_cb(r['o'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _client(self):\n while True:\n body = self.queue.get(True)\n print \"Sending %s bytes (%s/%s)\" % (len(body), self.queue.qsize(), self.queue.maxsize)\n\n try:\n req = urllib2.Request(self.endpoint, body)\n urllib2.urlopen(req).read()\n ...
[ "0.63026744", "0.6272743", "0.62231123", "0.61339027", "0.6106243", "0.6011678", "0.59978294", "0.5916277", "0.588042", "0.5807718", "0.5794925", "0.57784057", "0.5756853", "0.57314724", "0.570634", "0.5697248", "0.5667771", "0.56608444", "0.56563604", "0.56539094", "0.562724...
0.59547377
7
This function just responds to the browser ULR
def home(): return render_template('index.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def webinterface(self):\n raise cherrypy.HTTPRedirect(self.url('', True))", "def lro_handling(self) -> global___Snippet.LroResponseHandling:", "def respond(self, resp):\n self.push(resp + '\\r\\n')\n self.logline('==> %s' % resp)", "def browser_iface():\n return 'Nothing to see here'"...
[ "0.5769322", "0.576179", "0.5676339", "0.5662295", "0.5662295", "0.55862385", "0.5506965", "0.5477685", "0.54589474", "0.5399092", "0.5390287", "0.5390287", "0.5381453", "0.53710353", "0.536323", "0.5349364", "0.53294426", "0.53231996", "0.5295247", "0.5248364", "0.52426815",...
0.0
-1
Add Site Static Resource Directory
def addMobileStaticResourceDir(self, dir: str) -> None: self.__rootMobileResource.addFileSystemRoot(dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_dirs_to_static(static_webapp_name):\n static_dir = '$HOME/webapps/%s' % static_webapp_name\n with settings(warn_only=True):\n with cd(static_dir):\n run(\"mkdir static && mkdir media\")\n run(\"rm index.html\")\n run(\"touch index.html\")\n with cd(code_...
[ "0.71481174", "0.6910498", "0.66968936", "0.66562533", "0.66559666", "0.65018547", "0.64497256", "0.6352136", "0.6325666", "0.6246173", "0.6058434", "0.6023625", "0.5898131", "0.5875398", "0.58405364", "0.5814434", "0.5805225", "0.57971865", "0.5774547", "0.57634854", "0.5763...
0.7166486
0
Add Site Resource Add a cusotom implementation of a served http resource.
def addMobileResource(self, pluginSubPath: bytes, resource: BasicResource) -> None: pluginSubPath = pluginSubPath.strip(b'/') self.__rootMobileResource.putChild(pluginSubPath, resource)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_site():\n log = current_app.log\n db = request.db\n Site = db.tables.Site\n Endpoint = db.tables.Endpoint\n site_data = {}\n endpoints = []\n try:\n if not request.data:\n return \"Missing POST data\", 400\n raw_site_data...
[ "0.6253832", "0.62042636", "0.62019485", "0.59963995", "0.5816037", "0.5807638", "0.5796633", "0.5752867", "0.5738871", "0.5671778", "0.56447417", "0.5639989", "0.5624698", "0.55881715", "0.55655074", "0.5461416", "0.54530823", "0.54488623", "0.5432111", "0.53743726", "0.5350...
0.52065897
28
Site Root Resource This returns the root site resource for this plugin.
def rootMobileResource(self) -> BasicResource: return self.__rootMobileResource
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def root(self):\n return Resource()", "def get_root(self, *args, **kwargs):\n return self._resources_manager.get_root(*args, **kwargs)", "def site(self):\n if not hasattr(self, '_site'):\n urltool = getToolByName(self.req.PARENTS[0], 'portal_url', None)\n if urltool i...
[ "0.7128751", "0.6892363", "0.678429", "0.65742475", "0.6537863", "0.6498057", "0.64581287", "0.64400375", "0.6385263", "0.62711334", "0.6235817", "0.6215665", "0.6187106", "0.61689866", "0.6154504", "0.61263305", "0.6093571", "0.6084454", "0.6069759", "0.6053258", "0.6003061"...
0.67585224
3
Provides either a random DidYouKnow fact from wiki or else any number of specific DYKs.
async def dyk(self, ctx, *nums: int): nums = nums or [random.randint(0, wiki_dyk.count - 1)] em = discord.Embed(title='Did you know...\n', description='', color=0xffffff) for item in ((n-1) % wiki_dyk.count if n else wiki_dyk.count - 1 for n in nums): em.description += f'**#{item + 1}:** {wiki_dyk.trivia[item]}\n' await ctx.send(embed=em)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randomHelmet():\n return random.choice(HELMETS)", "def get_random_fact(url):\n facts = get_webpage(url)\n return facts[random.randint(0, len(facts))]", "async def dogfact(self, ctx: DogbotContext):\n await ctx.send(random.choice(self.dogfacts))", "def get_fortune():\n data_file = get_dat...
[ "0.6159103", "0.595592", "0.5825193", "0.5651778", "0.55767024", "0.55524427", "0.5543401", "0.5500759", "0.5500759", "0.5500759", "0.54952174", "0.54726946", "0.54506487", "0.5439445", "0.5428501", "0.54166263", "0.53925294", "0.53677875", "0.53525066", "0.53501534", "0.5328...
0.5818779
3
Return the default logger for the module.
def logger(): return logging.getLogger(__name__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _logger():\n return logging.getLogger(module_name)", "def get_module_logger():\n return logging.getLogger(__name__)", "def get_logger():\n return logging.getLogger(__name__)", "def getLogger():\n return GlobalLogger.logger", "def _get_logger():\n return logging.Logger(__name__)",...
[ "0.8332255", "0.8202088", "0.8047881", "0.7980913", "0.7945653", "0.78576833", "0.7827138", "0.7816803", "0.77877396", "0.7782513", "0.77736753", "0.77449965", "0.77242976", "0.7719314", "0.76511186", "0.76373756", "0.76299536", "0.7607513", "0.75435734", "0.75402004", "0.753...
0.7916416
5
Proxy for subprocess.call with logging.
def call(cmd, *args, **kwargs): import subprocess logger().info('call `%s`', ' '.join(cmd)) return subprocess.call(cmd, *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _subprocess_call(*args, **kwargs):\n return subprocess.call(*args, **kwargs)", "def call_subprocess_with_logging(command, my_env=None):\n logger.info(\"About to call : {0}\".format(command))\n return_code = 0\n try:\n if my_env:\n return_out = subprocess.check_output(command, st...
[ "0.70570767", "0.64355564", "0.6330634", "0.63057506", "0.62618166", "0.6218107", "0.61542827", "0.6139648", "0.60723656", "0.6049053", "0.6030424", "0.60250443", "0.6003652", "0.597314", "0.59305435", "0.5910463", "0.5904408", "0.58928114", "0.5857994", "0.58190536", "0.5814...
0.6278796
4
Sets up a rate law following the HenriMichaelisMenten law. v = k_cat protein substrate / (k_m + substrate)
def MichaelisMentenKCat( substrate: str, protein: str, enzmldoc, k_cat: Dict[str, Any] = {"ontology": SBOTerm.K_CAT}, k_m: Dict[str, Any] = {"ontology": SBOTerm.K_M}, ): # Check if the given IDs are part of the EnzymeML document already if substrate not in enzmldoc.getSpeciesIDs(): raise SpeciesNotFoundError( species_id=substrate, enzymeml_part="Reactants/Proteins" ) if protein not in enzmldoc.getSpeciesIDs(): raise SpeciesNotFoundError( species_id=protein, enzymeml_part="Reactants/Proteins" ) # Check if ontologies are added, if not add them if k_m.get("ontology") is None: k_m["ontology"] = SBOTerm.K_M if k_cat.get("ontology") is None: k_cat["ontology"] = SBOTerm.K_CAT # Create the model using a factory model = ModelFactory( name="Michaelis-Menten Rate Law", equation="k_cat * protein * substrate / (k_m + substrate)", k_cat=k_cat, k_m=k_m, ontology=SBOTerm.MICHAELIS_MENTEN, ) return model(protein=protein, substrate=substrate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def birch_murnaghan(p, v):\n return p[0]+9.0/16*p[3]*p[1]*( ( (p[3]/v)**(2.0/3)-1 )**3*p[2]+\n ( (p[3]/v)**(2.0/3)-1 )**2*\n ( 6-4*(p[3]/v)**(2.0/3) ) )", "def murnaghan(p, v):\n return p[0]+p[1]*v/p[2]*((p[3]/v)**p[2]/(p[2]-1)+1)-p[1]*p[3...
[ "0.5920097", "0.579926", "0.56057304", "0.55859894", "0.55132955", "0.5480198", "0.54527843", "0.54427314", "0.54189944", "0.538485", "0.53595084", "0.53503376", "0.5327499", "0.5325421", "0.52962947", "0.5276504", "0.52582276", "0.5244729", "0.5234504", "0.5230446", "0.52264...
0.5424896
8
Sets up a rate law following the HenriMichaelisMenten law. v = vmax substrate / (k_m + substrate)
def MichaelisMentenVMax( substrate: str, enzmldoc, vmax: Dict[str, Any] = {"ontology": SBOTerm.V_MAX}, k_m: Dict[str, Any] = {"ontology": SBOTerm.K_M}, ): # Check if the given IDs are part of the EnzymeML document already if substrate not in enzmldoc.getSpeciesIDs(): raise SpeciesNotFoundError( species_id=substrate, enzymeml_part="Reactants/Proteins" ) # Check if ontologies are added, if not add them if k_m.get("ontology") is None: k_m["ontology"] = SBOTerm.K_M if vmax.get("ontology") is None: vmax["ontology"] = SBOTerm.V_MAX # Create the model using a factory model = ModelFactory( name="Michaelis-Menten Rate Law", equation="vmax * substrate / (k_m + substrate)", vmax=vmax, k_m=k_m, ontology=SBOTerm.MICHAELIS_MENTEN, ) return model(substrate=substrate)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def birch_murnaghan(p, v):\n return p[0]+9.0/16*p[3]*p[1]*( ( (p[3]/v)**(2.0/3)-1 )**3*p[2]+\n ( (p[3]/v)**(2.0/3)-1 )**2*\n ( 6-4*(p[3]/v)**(2.0/3) ) )", "def murnaghan(p, v):\n return p[0]+p[1]*v/p[2]*((p[3]/v)**p[2]/(p[2]-1)+1)-p[1]*p[3...
[ "0.64286643", "0.6351248", "0.5775196", "0.5764248", "0.569532", "0.5618492", "0.55840904", "0.55695754", "0.55393964", "0.5536443", "0.5521898", "0.55039316", "0.5488269", "0.54849935", "0.5462513", "0.54570895", "0.5439389", "0.5419699", "0.54196864", "0.54181284", "0.54148...
0.55910826
6
Normalize the features in the data set.
def normalize_features(array): array_normalized = (array-array.mean())/array.std() mu = array.mean() sigma = array.std() return array_normalized, mu, sigma
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalizeFeatureVector(self):\n # Normalize features\n total = 0.0\n for v in self.features.values(): total += abs(v)\n if total == 0.0: \n total = 1.0\n for k,v in self.features.iteritems():\n self.features[k] = float(v) / total", "def _normalize(self...
[ "0.79551655", "0.79528546", "0.7853807", "0.77867204", "0.7698743", "0.76595855", "0.76581943", "0.7635334", "0.76207286", "0.7542891", "0.75298584", "0.74793464", "0.74740255", "0.7413795", "0.7320142", "0.7320142", "0.7318681", "0.73162293", "0.7313582", "0.72916085", "0.72...
0.7046461
27
Compute the cost function given a set of features / values, and the values for our thetas.
def compute_cost(features, values, theta): npoints = len(values) sum_of_square_errors = np.square(np.dot(features, theta) - values).sum() cost = sum_of_square_errors / (2*npoints) return cost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_cost(features, values, theta):\r\n \r\n # your code here\r\n error = (values - features.dot(theta))\r\n cost = error.dot(error) \r\n return cost", "def compute_cost(features, values, theta):\r\n m = len(values)\r\n sum_of_square_errors = numpy.square(numpy.dot(features, thet...
[ "0.74634284", "0.72324276", "0.7181755", "0.69701", "0.69247854", "0.6471834", "0.6227543", "0.6182345", "0.617613", "0.6167203", "0.6144014", "0.6132684", "0.61300564", "0.6123157", "0.6107986", "0.6094501", "0.60778195", "0.6051131", "0.60425967", "0.60174876", "0.60173744"...
0.71723294
3
Perform gradient descent given a data set with an arbitrary number of features.
def gradient_descent(features, values, theta, alpha, num_iterations): # number of points npoints = len(values) # intialize cost history cost_history = [] # num_interations iterations for iiter in range(num_iterations): # compute and store cost cost = compute_cost(features, values, theta) cost_history.append(cost) # update values of theta values_predicted = np.dot(features, theta) theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features)) return theta, pandas.Series(cost_history)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient_descent(features, labels, alpha, num_iters):\n # Initial settings of weights\n weights = [0, 0, 0]\n\n # Length of dataset\n N = len(features[0])\n\n # Take 100 gradient steps\n gradient_losses = [0, 0, 0]\n\n # Take num_iters steps of gradient descent\n for step in range(num_i...
[ "0.75024587", "0.7380757", "0.7227321", "0.7210996", "0.7183001", "0.712503", "0.6942265", "0.6937524", "0.68507755", "0.68019944", "0.6660215", "0.66235965", "0.65996253", "0.65692246", "0.6564588", "0.65573347", "0.6539201", "0.65332645", "0.6500159", "0.6479878", "0.643247...
0.7039808
6
This function is for viewing the plot of your cost history.
def plot_cost_history(alpha, cost_history): cost_df = pandas.DataFrame({ 'Cost_History': cost_history, 'Iteration': range(len(cost_history)) }) return ggplot(cost_df, aes('Iteration', 'Cost_History')) + geom_point() + ggtitle('Cost History for alpha = %.3f' % alpha )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_costs(j_history):\n plt.figure(figsize=(14, 8))\n plt.plot(range(len(j_history)), j_history)\n plt.grid(True)\n plt.title('J (Cost)')\n plt.xlabel('Iteration')\n plt.ylabel('Cost function')\n plt.xlim([0, 1.05 * ITERATIONS])\n plt.ylim([4, 7])\n plt.show()\n plt.close()", "...
[ "0.745946", "0.7305429", "0.6926057", "0.68891627", "0.6870632", "0.65292275", "0.65152884", "0.65122676", "0.650241", "0.6492996", "0.646902", "0.6411578", "0.64004505", "0.63721967", "0.63554156", "0.6354006", "0.63402677", "0.6333534", "0.63082844", "0.63080597", "0.629246...
0.7419536
1
Delete the index from Elastic Search
def trigger_delete(cls, instance): es_client.delete(instance.blog.index_name(), 'blog_post_index', instance.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_index(self):\n es = self.get_es()\n if es.head(self.es_index):\n es.delete(self.es_index)", "def _delete_index( env, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index....
[ "0.84657955", "0.8269354", "0.81989384", "0.7981969", "0.78138375", "0.78138286", "0.77492446", "0.7635957", "0.7554919", "0.7533688", "0.75277823", "0.74774957", "0.73601156", "0.7322131", "0.7238156", "0.71974444", "0.70426786", "0.7019291", "0.6958277", "0.69546735", "0.69...
0.64396274
33
Delete the index from Elastic Search
def trigger_delete(cls, instance): es_client.delete(instance.blog.index_name(), 'blog_page_index', instance.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_index(self):\n es = self.get_es()\n if es.head(self.es_index):\n es.delete(self.es_index)", "def _delete_index( env, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index....
[ "0.84657955", "0.8269354", "0.81989384", "0.7981969", "0.78138375", "0.78138286", "0.77492446", "0.7635957", "0.7554919", "0.7533688", "0.75277823", "0.74774957", "0.73601156", "0.7322131", "0.7238156", "0.71974444", "0.70426786", "0.7019291", "0.6958277", "0.69546735", "0.69...
0.6270193
44
Searches inside all the indices
def search(text): s = Search() result = _search(s, text) _print_results(result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(query, idx):\n\n if len(query) == 0:\n return []\n ordered = {}\n for e in query:\n ordered[e] = len(idx[e])\n ordered = sorted(ordered.items(), key = lambda d: d[1])\n results = idx[ordered[0][0]]\n i = 1\n while i < len(ordered):\n results = intersect(results,...
[ "0.67821324", "0.65260226", "0.6520989", "0.6491237", "0.6243144", "0.61972666", "0.6069781", "0.60370374", "0.60119206", "0.60016865", "0.5995763", "0.5986045", "0.5966168", "0.5955528", "0.592799", "0.589361", "0.5850095", "0.58489054", "0.58428663", "0.5826654", "0.5807836...
0.0
-1
Searches inside the index for umbra3d
def search_umbra(text): result = _search_blog('umbra3d', text) _print_results(result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clustering_dbscan_o3d():\n pass", "def cloud_index():\n import alltheitems.cloud\n return alltheitems.cloud.index()", "def test_figure3(self):\n\n ssearcher = SimpleSearcher.from_prebuilt_index('msmarco-passage')\n encoder = TctColBertQueryEncoder('castorini/tct_colbert-msmarco')\n ...
[ "0.5886022", "0.55750906", "0.5477457", "0.51528597", "0.5148673", "0.5101667", "0.50694734", "0.4972866", "0.4953324", "0.49039114", "0.48821348", "0.48311734", "0.4818827", "0.4813475", "0.47986737", "0.47905108", "0.4765872", "0.47636506", "0.47565117", "0.47055826", "0.46...
0.6636374
0
Searches inside the index for postman
def search_postman(text): result = _search_blog('postman', text) _print_results(result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(es_object, index_name, search):\n res = es_object.search(index=index_name, body=search)\n pprint(res)", "def search_v1(query_tokens, inverted_index):\n return []", "def fetch_all(): \n client, index_name = connection_es()\n res = client.search(index = index_name+\"*\")\n return ...
[ "0.67847615", "0.6660635", "0.66422635", "0.66095644", "0.65551525", "0.6506054", "0.64736253", "0.6453532", "0.6433664", "0.63998264", "0.63600475", "0.63524884", "0.6313629", "0.62886024", "0.62773657", "0.6242144", "0.6226204", "0.6226116", "0.62253296", "0.62253296", "0.6...
0.5915123
60
Parse a date string from VocaDB API
def _parse_date(date_str: str) -> datetime: datetime_obj = datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%SZ") return f"<t:{int(datetime_obj.timestamp())}:d>"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_date(str_date):\n return ciso8601.parse_datetime(str_date)", "def parse_date(date):\n # MediaWiki API dates are always of the format\n # YYYY-MM-DDTHH:MM:SSZ\n # (see $formats in wfTimestamp() in includes/GlobalFunctions.php)\n return datetime.strptime(date, '%Y-%m-%dT%...
[ "0.680809", "0.67152417", "0.65661365", "0.6514984", "0.6471285", "0.6438476", "0.6424507", "0.6418622", "0.6410768", "0.6408324", "0.63828737", "0.6381386", "0.6373164", "0.6363841", "0.63582087", "0.635436", "0.634703", "0.6330038", "0.62913615", "0.6277557", "0.6273731", ...
0.66212666
2
Fetch data from VocaDB API and prompt the user to select an entry
async def _fetch_data(self, ctx: commands.Context, query: str): params = { "query": query, "maxResults": 10, "sort": "FavoritedTimes", "preferAccurateMatches": "true", "nameMatchMode": "Words", "fields": "Artists,Lyrics,Names,ThumbUrl", } headers = { "User-Agent": f"Red-DiscordBot/{red_version} Fixator10-cogs/VocaDB/{self.__version__}" } try: async with self.session.get(BASE_API_URL, params=params, headers=headers) as resp: if resp.status != 200: return f"https://http.cat/{resp.status}" result = await resp.json() except asyncio.TimeoutError: return "Request timed out" all_items = result.get("items") if not all_items: return None filtered_items = [x for x in all_items if x.get("lyrics")] if not filtered_items: return None if len(filtered_items) == 1: return filtered_items[0] items = "\n".join( f"**`[{i}]`** {x.get('defaultName')} - {x.get('artistString')}" f" (published: {self._parse_date(x.get('publishDate'))})" for i, x in enumerate(filtered_items, start=1) ) prompt = await ctx.send( f"Found below **{len(filtered_items)}** result(s). Pick one in 60 seconds:\n\n{items}" ) def check(msg: discord.Message) -> bool: return bool( msg.content.isdigit() and int(msg.content) in range(len(filtered_items) + 1) and msg.author.id == ctx.author.id and msg.channel.id == ctx.channel.id ) try: choice = await self.bot.wait_for("message", timeout=60.0, check=check) except asyncio.TimeoutError: choice = None if choice is None or choice.content.strip() == "0": with contextlib.suppress(discord.NotFound, discord.HTTPException): await prompt.edit(content="Cancelled.", delete_after=5.0) return None choice = int(choice.content.strip()) - 1 with contextlib.suppress(discord.NotFound, discord.HTTPException): await prompt.delete() return filtered_items[choice]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get():\n id_num = int(input('Enter the ID number of the item you wish to retrieve\\n'))\n db_actions.retrieve(id_num)", "def get_user_input():\n st.sidebar.header('Parámetros de entrada') \n acti2 = st.sidebar.selectbox('Código de Actividad Económica', ['ACABADO DE PRODUCTOS TEXTILES',\n ...
[ "0.590885", "0.56969845", "0.56191784", "0.5579956", "0.55592823", "0.5529667", "0.5518773", "0.55171174", "0.551635", "0.5444369", "0.53913987", "0.53488976", "0.5306791", "0.52756387", "0.5254248", "0.5252376", "0.5200848", "0.5175717", "0.51679784", "0.5167819", "0.5120613...
0.48027787
72
Create an embed with the song info
def _info_embed(self, colour, data: Dict[str, Any]) -> discord.Embed: minutes = data.get("lengthSeconds", 0) // 60 seconds = data.get("lengthSeconds", 0) % 60 pub_date = self._parse_date(data.get("publishDate")) all_artists = ", ".join( f"[{x.get('name')}](https://vocadb.net/Ar/{x.get('id')}) ({x.get('categories')})" for x in data.get("artists") ) embed = discord.Embed(colour=colour) embed.title = f"{data.get('defaultName')} - {data.get('artistString')}" embed.url = f"https://vocadb.net/S/{data.get('id')}" embed.set_thumbnail(url=data.get("thumbUrl", "")) embed.add_field(name="Duration", value=f"{minutes} minutes, {seconds} seconds") favorites, score = (data.get("favoritedTimes", 0), data.get("ratingScore", 0)) embed.add_field(name="Published On", value=pub_date) embed.add_field(name="Statistics", value=f"{favorites} favourite(s), {score} total score") embed.add_field(name="Artist(s)", value=all_artists) embed.set_footer(text="Powered by VocaDB") return embed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begin:\", value=event....
[ "0.7574429", "0.6955898", "0.6697787", "0.6493446", "0.63093257", "0.6292664", "0.6217672", "0.6171954", "0.6141338", "0.6098868", "0.6069204", "0.5961678", "0.5946134", "0.5936363", "0.59200823", "0.5897257", "0.58952886", "0.58319783", "0.5789473", "0.5774787", "0.5751909",...
0.6323162
4
Create an embed with the lyrics
def _lyrics_embed(colour, page: Dict[str, Any], data: Dict[str, Any]) -> discord.Embed: title = [ x.get("value") for x in data.get("names") if x.get("language") == LANGUAGE_MAP.get(page["cultureCode"]) ] em = discord.Embed( title=title[0] if title else data.get("defaultName"), colour=colour, ) em.set_thumbnail(url=data.get("thumbUrl") or "") if data.get("id"): em.url = f"https://vocadb.net/S/{data['id']}" em.description = page["value"][:4090] if page.get("value") else "No lyrics found." if page.get("url"): em.add_field( name="Source", value=f"[{page.get('source') or 'Source'}]({page['url']})", ) return em
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def embed():", "async def _create_embed(self, event, info):\n\n e = discord.Embed(url=info.get(\"url\"))\n e.title = \"%s %s!\" % (info.get(\"streamer\"), info.get(\"live_status\"))\n e.add_field(name=\"Stream title\", value=info.get(\"title\"), inline=False)\n e.add_field(name=\"Begi...
[ "0.70255935", "0.63106394", "0.62139446", "0.61759347", "0.60498273", "0.60261035", "0.5952487", "0.59314024", "0.5826032", "0.5814283", "0.57923996", "0.5775373", "0.57529444", "0.5735012", "0.56990874", "0.56855226", "0.56756103", "0.5667003", "0.56360406", "0.5626547", "0....
0.7571321
0
Fetch Vocaloid song lyrics from VocaDB.net database
async def vocadb(self, ctx: commands.Context, *, query: str): await ctx.trigger_typing() data = await self._fetch_data(ctx, query) if type(data) == str: return await ctx.send(data) if not data: return await ctx.send("No results found.") await ctx.send(embed=self._info_embed(await ctx.embed_colour(), data)) # Added a small delay to improve UX for initial embed await asyncio.sleep(2.0) embeds = [] for i, page in enumerate(data["lyrics"], start=1): language = f"Language: {LANGUAGE_MAP.get(page.get('cultureCode', 'na'))}" emb = self._lyrics_embed(await ctx.embed_colour(), page, data) emb.set_footer(text=f"{language} • Page {i} of {len(data['lyrics'])}") embeds.append(emb) controls = {"\N{CROSS MARK}": close_menu} if len(embeds) == 1 else DEFAULT_CONTROLS await menu(ctx, embeds, controls=controls, timeout=90.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lyrics(self):\n return get_lyrics(self.artist, self.title,'')", "def get_lyrics(self, artist, song):\n\n # Disable lyrics display\n self.status_bar.hide()\n self.lyrics_view.hide()\n self.scroll.hide()\n\n lyrics = None\n in_database = False\n\n if self...
[ "0.63000757", "0.6130476", "0.60185164", "0.5984323", "0.587224", "0.58188677", "0.5779001", "0.5714484", "0.57014984", "0.5568323", "0.5517398", "0.55104864", "0.5498643", "0.54863167", "0.5469233", "0.54516006", "0.543093", "0.54292554", "0.5408678", "0.5388531", "0.5296494...
0.56197834
9
Fuse conv and bn into one module.
def _fuse_conv_bn(conv, bn): conv_w = conv.weight conv_b = conv.bias if conv.bias is not None else torch.zeros_like( bn.running_mean) factor = bn.weight / torch.sqrt(bn.running_var + bn.eps) conv.weight = nn.Parameter(conv_w * factor.reshape([conv.out_channels, 1, 1, 1])) conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias) return conv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fuse_conv_bn(module):\n last_conv = None\n last_conv_name = None\n\n for name, child in module.named_children():\n if isinstance(child,\n (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):\n if last_conv is None: # only fuse BN that is after Conv\n ...
[ "0.66424954", "0.6055714", "0.602726", "0.59107614", "0.5825306", "0.57937133", "0.57798207", "0.5775232", "0.5716006", "0.56922126", "0.5688817", "0.5679025", "0.566316", "0.56493765", "0.56161374", "0.5604259", "0.5571019", "0.5565932", "0.5558572", "0.5545696", "0.5540742"...
0.651783
1
Recursively fuse conv and bn in a module. During inference, the functionary of batch norm layers is turned off but only the mean and var alone channels are used, which exposes the chance to fuse it with the preceding conv layers to save computations and simplify network structures.
def fuse_conv_bn(module): last_conv = None last_conv_name = None for name, child in module.named_children(): if isinstance(child, (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)): if last_conv is None: # only fuse BN that is after Conv continue fused_conv = _fuse_conv_bn(last_conv, child) module._modules[last_conv_name] = fused_conv # To reduce changes, set BN as Identity instead of deleting it. module._modules[name] = nn.Identity() last_conv = None elif isinstance(child, nn.Conv2d): last_conv = child last_conv_name = name else: fuse_conv_bn(child) return module
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fuse_conv_bn(conv, bn):\n conv_w = conv.weight\n conv_b = conv.bias if conv.bias is not None else torch.zeros_like(\n bn.running_mean)\n\n factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)\n conv.weight = nn.Parameter(conv_w *\n factor.reshape([conv.out_...
[ "0.695171", "0.6805263", "0.6269793", "0.62578046", "0.6063939", "0.6063939", "0.60563296", "0.6027096", "0.60044354", "0.5954461", "0.5929739", "0.5882636", "0.58481455", "0.5835012", "0.5830163", "0.57766056", "0.57479167", "0.5734447", "0.57262725", "0.5723586", "0.5721918...
0.7612941
0
When updating from a dictionary, this is processed for any key that does not match a ``ConfigurationProperty``.
def update_default_from_dict(self, key, value): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def _update(self, config_dict, allow_new_keys=True):\n if not config_dict:\n return\n\n for k, v in six.iteritems(config_dict):\n if k not in self.__dict__.keys():\n if allow...
[ "0.63395983", "0.6195475", "0.61581475", "0.61513746", "0.6097584", "0.5982047", "0.5815127", "0.580773", "0.578081", "0.5776933", "0.5750993", "0.5738719", "0.5706137", "0.5621806", "0.56125647", "0.5599297", "0.5510102", "0.5506825", "0.5498179", "0.5492065", "0.5476959", ...
0.58367
6
When merging from a dictionary, this is processed for any key that does not match a ``ConfigurationProperty``.
def merge_default_from_dict(self, key, value, lists_only=False): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(target_config, other_config):\n for key, value in other_config.items():\n if key not in target_config or not isinstance(value, dict):\n target_config[key] = value\n else:\n merge(target_config[key], other_config[key])", "def test_merge_overwrite_missing_source_key...
[ "0.59852463", "0.58540046", "0.57954663", "0.5752213", "0.57376033", "0.5702366", "0.5665245", "0.5632572", "0.55908483", "0.55750465", "0.5550517", "0.5546977", "0.5520356", "0.55015045", "0.5499027", "0.54877573", "0.5457098", "0.54544103", "0.5435976", "0.54214597", "0.536...
0.5641025
7
Updates this configuration object from a dictionary.
def update_from_dict(self, dct): if not dct: return all_props = self.__class__.CONFIG_PROPERTIES for key, value in six.iteritems(dct): attr_config = all_props.get(key) if attr_config: setattr(self, key, value) else: self.update_default_from_dict(key, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def update(self, config_dict):\n self._update(config_dict, allow_new_keys=True)", "def update_from_dict(self, dictionary):\n for key in dictionary:\n setattr(self, key, dictionary[key])\n ret...
[ "0.7539606", "0.7526402", "0.73409635", "0.6924898", "0.6826344", "0.68139464", "0.67985266", "0.6760424", "0.6756888", "0.66938514", "0.65406144", "0.6525585", "0.6479391", "0.64561343", "0.6436355", "0.6408912", "0.6408051", "0.63989556", "0.63800776", "0.63728046", "0.6356...
0.7746895
0
Updates this configuration object from another.
def update_from_obj(self, obj, copy=False): obj.clean() obj_config = obj._config all_props = self.__class__.CONFIG_PROPERTIES if copy: for key, value in six.iteritems(obj_config): attr_config = all_props.get(key) if attr_config: attr_type = attr_config.attr_type if attr_type: if issubclass(attr_type, list): self._config[key] = value[:] elif attr_type is dict: self._config[key] = value.copy() else: self._config[key] = value self._modified.discard(key) else: filtered_dict = {key: value for key, value in six.iteritems(obj_config) if key in all_props} self._config.update(filtered_dict) self._modified.difference_update(filtered_dict.keys())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, other: Mapping[str, Any]) -> None:\n self._config.update(self._flatten_dict(other))", "def update(self, other):\n _merge_dicts(self, other)", "def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy...
[ "0.79346645", "0.697184", "0.6949921", "0.6621027", "0.6513877", "0.6501808", "0.6463023", "0.64405686", "0.6436044", "0.62805444", "0.62770504", "0.62714624", "0.6265465", "0.6204351", "0.61828285", "0.61200017", "0.61178195", "0.6052243", "0.60450506", "0.60440445", "0.6012...
0.6128411
15
Merges a dictionary into this configuration object.
def merge_from_dict(self, dct, lists_only=False): if not dct: return self.clean() all_props = self.__class__.CONFIG_PROPERTIES for key, value in six.iteritems(dct): attr_config = all_props.get(key) if attr_config: attr_type, default, input_func, merge_func = attr_config[:4] if (merge_func is not False and value != default and (not lists_only or (attr_type and issubclass(attr_type, list)))): if input_func: value = input_func(value) self._merge_value(attr_type, merge_func, key, value) else: self.merge_default_from_dict(key, value, lists_only=lists_only)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_config(self_config, indict):\n\n self_config.merge(indict)\n patch_config(self_config, indict)", "def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)", "def merge(target_config, other_config):\n for key, value in other_config.items():\...
[ "0.72250795", "0.70428866", "0.6939675", "0.6870766", "0.67551553", "0.662299", "0.6565024", "0.647432", "0.6393523", "0.6344991", "0.6342676", "0.626442", "0.6260575", "0.6257758", "0.62464523", "0.6230818", "0.6222192", "0.6182466", "0.6168279", "0.61122817", "0.6098302", ...
0.6414965
8
Merges a configuration object into this one.
def merge_from_obj(self, obj, lists_only=False): self.clean() obj.clean() obj_config = obj._config all_props = self.__class__.CONFIG_PROPERTIES for key, value in six.iteritems(obj_config): attr_config = all_props[key] attr_type, default, __, merge_func = attr_config[:4] if (merge_func is not False and value != default and (not lists_only or (attr_type and issubclass(attr_type, list)))): self._merge_value(attr_type, merge_func, key, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n ...
[ "0.76882917", "0.67406607", "0.6626241", "0.65219", "0.6375062", "0.6338064", "0.6310122", "0.63097996", "0.6292221", "0.62915736", "0.622043", "0.61334", "0.6007048", "0.5979473", "0.5954268", "0.5950543", "0.5942655", "0.5942226", "0.5904852", "0.5901438", "0.58915347", "...
0.5562985
45
Updates the configuration with the contents of the given configuration object or dictionary. In case of a dictionary, only valid attributes for this class are considered. Existing attributes are replaced with the new values. The object is not cleaned before or after, i.e. may accept invalid input. In case of an update by object, that object is cleaned before the update, so that updated values should be validated. However, alreadystored values are not cleaned before or after.
def update(self, values, copy_instance=False): if isinstance(values, self.__class__): self.update_from_obj(values, copy=copy_instance) elif isinstance(values, dict): self.update_from_dict(values) else: raise ValueError("{0} or dictionary expected; found '{1}'.".format(self.__class__.__name__, type(values).__name__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update(self, config_dict, allow_new_keys=True):\r\n if not config_dict:\r\n return\r\n\r\n for k, v in six.iteritems(config_dict):\r\n if k not in self.__dict__:\r\n if allow_new_keys:\r\n self.__setattr__(k, v)\r\n else:\r\n raise KeyError('Key `{}` does not ex...
[ "0.7369526", "0.72188014", "0.71054125", "0.6892814", "0.68874276", "0.68512803", "0.641856", "0.63729274", "0.6282296", "0.61824185", "0.6173597", "0.61599773", "0.61506504", "0.6055091", "0.6051019", "0.6051019", "0.6043906", "0.60201603", "0.59867066", "0.59764946", "0.596...
0.55972207
51
Merges listbased attributes into one list including unique elements from both lists. When ``lists_only`` is set to ``False``, updates dictionaries and overwrites singlevalue attributes. The resulting configuration is 'clean', i.e. input values converted and validated. If the conversion is not possible, a ``ValueError`` is raised.
def merge(self, values, lists_only=False): if isinstance(values, self.__class__): self.merge_from_obj(values, lists_only=lists_only) elif isinstance(values, dict): self.merge_from_dict(values, lists_only=lists_only) else: raise ValueError("{0} or dictionary expected; found '{1}'.".format(self.__class__.__name__, type(values).__name__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_from_obj(self, obj, lists_only=False):\n self.clean()\n obj.clean()\n obj_config = obj._config\n all_props = self.__class__.CONFIG_PROPERTIES\n for key, value in six.iteritems(obj_config):\n attr_config = all_props[key]\n attr_type, default, __, me...
[ "0.65139383", "0.5578662", "0.5572278", "0.55066985", "0.5469693", "0.53935814", "0.5371809", "0.53107864", "0.5277536", "0.5260815", "0.5229918", "0.52003044", "0.5156324", "0.5125335", "0.5069357", "0.50458443", "0.5019574", "0.4998074", "0.49735352", "0.49711102", "0.49676...
0.6129385
1
Creates a copy of the current instance.
def copy(self): return self.__class__(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result", "def copy(self):\n return self.__class__(dict(self))", "def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "de...
[ "0.8445658", "0.83826053", "0.8367299", "0.83550745", "0.8290682", "0.82799333", "0.8262882", "0.823516", "0.8217232", "0.8166611", "0.8156989", "0.81500196", "0.8146055", "0.8143642", "0.81333435", "0.81235075", "0.8090402", "0.8090402", "0.80738163", "0.8067191", "0.8032540...
0.86091197
1
Cleans the input values of this configuration object. Fields that have gotten updated through properties are converted to configuration values that match the format needed by functions using them. For example, for listlike values it means that input of single strings is transformed into a singleentry list. If this conversion fails, a ``ValueError`` is raised.
def clean(self): all_props = self.__class__.CONFIG_PROPERTIES for prop_name in self._modified: attr_config = all_props.get(prop_name) if attr_config and attr_config.input_func: self._config[prop_name] = attr_config.input_func(self._config[prop_name]) self._modified.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self, value):\n value = self.validate_to_python(value)\n self.run_validators(value)\n return value", "def clean(self, value):\n value = self.validate_to_python(value)\n self.run_validators(value)\n return value", "def clean(cls, value):\n return", "d...
[ "0.60418636", "0.60418636", "0.59051543", "0.5863725", "0.5829741", "0.5760881", "0.5721319", "0.5714525", "0.5582324", "0.5558668", "0.5509648", "0.5507196", "0.55028373", "0.54199827", "0.54151857", "0.5408943", "0.5379859", "0.53760797", "0.5363679", "0.53535485", "0.53409...
0.6916164
0
Whether the current object is 'clean', i.e. has no nonconverted input.
def is_clean(self): return not self._modified
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self) -> bool:\n raise NotImplementedError()", "def __bool__(self):\n return self.isValid()", "def __bool__(self):\n return self.is_valid", "def is_valid(self):\n self.clean()\n return not bool(self.errors)", "def is_raw(self):\n return not self._isReduce...
[ "0.6872073", "0.63761157", "0.63610554", "0.6312004", "0.63031477", "0.63022435", "0.6240924", "0.61812806", "0.61137867", "0.60905904", "0.60698545", "0.6046526", "0.60410285", "0.603819", "0.6018456", "0.60071707", "0.60055494", "0.5978157", "0.5974375", "0.59508747", "0.59...
0.73114955
0
Returns a copy of the configuration dictionary. Changes in this should not reflect on the original object.
def as_dict(self): self.clean() d = OrderedDict() all_props = self.__class__.CONFIG_PROPERTIES for attr_name, attr_config in six.iteritems(all_props): value = self._config[attr_name] attr_type = attr_config.attr_type if attr_type: if value: if issubclass(attr_type, list): if issubclass(attr_type, NamedTupleList): d[attr_name] = [i._asdict() for i in value] else: d[attr_name] = value[:] elif attr_type is dict: d[attr_name] = dict(value) elif value is not NotSet: d[attr_name] = value return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __copy__(self):\n d = dict()\n d.update(self.items())\n return d", "def copy(self):\n return self.from_dict(self.to_dict(True))", "def get(self) -> dict:\n return clone(self.__dict__)", "def clone(self) -> 'ContainerConfig':\n return deepcopy(self)", "def copy(...
[ "0.74385566", "0.7369222", "0.73497945", "0.7333246", "0.7279644", "0.7213318", "0.72081774", "0.7177167", "0.71266174", "0.70725924", "0.70725346", "0.7034118", "0.69861895", "0.69729465", "0.69077206", "0.69053024", "0.68810904", "0.68651885", "0.6863838", "0.6860341", "0.6...
0.0
-1
Prepares a new picking for this stock request as it could not be assigned to another picking. This method is designed to be inherited.
def _prepare_picking_values(self): return { 'origin': self.doc_num, 'company_id': self.company_id.id, 'move_type': 'direct', 'partner_id': self.partner_id.id, 'picking_type_id': self.picking_type_id.id, 'location_id': self.location_id.id, 'location_dest_id': self.location_dest_id.id, 'picking_type_code': self.request_type_code }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_chained_picking(self, cr, uid, pick_name, picking, purchase_type, move, context=None):\n res = super(stock_move, self)._create_chained_picking(cr, uid, pick_name, picking, purchase_type, move, context=context)\n if picking.purchase_id:\n self.pool.get('stock.picking').write(cr,...
[ "0.6094836", "0.5825267", "0.5737362", "0.5558884", "0.55338615", "0.5362217", "0.53577745", "0.53384507", "0.5322603", "0.531471", "0.5305547", "0.5271691", "0.52530354", "0.5203157", "0.517102", "0.51381963", "0.50887847", "0.5068953", "0.5053527", "0.50466454", "0.5034907"...
0.54961354
5
Get the vocab file and casing info from the Hub module.
def create_tokenizer_from_hub_module(bert_path, sess): bert_module = hub.Module(bert_path) tokenization_info = bert_module(signature="tokenization_info", as_dict=True) vocab_file, do_lower_case = tf.print( [tokenization_info["vocab_file"], tokenization_info["do_lower_case"]] ) return FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vocab():\n if data_dir is not None and vocab_filename is not None:\n vocab_filepath = os.path.join(data_dir, vocab_filename)\n if tf.gfile.Exists(vocab_filepath):\n tf.logging.info(\"Found vocab file: %s\", vocab_filepath)\n vocab_symbolizer = text_encoder.SubwordTextEncoder(voca...
[ "0.6443318", "0.63813704", "0.6220464", "0.61732", "0.5963318", "0.5917839", "0.59003973", "0.5888739", "0.58854616", "0.5765365", "0.56868184", "0.5684578", "0.5683986", "0.56751275", "0.55819345", "0.5499176", "0.5480448", "0.5460594", "0.5460213", "0.54520303", "0.544654",...
0.506715
58
Converts a single `InputExample` into a single `InputFeatures`.
def convert_single_example(tokenizer, example, max_seq_length=256): if isinstance(example, PaddingInputExample): input_ids = [0] * max_seq_length input_mask = [0] * max_seq_length segment_ids = [0] * max_seq_length label = 0 return input_ids, input_mask, segment_ids, label tokens_a = tokenizer.tokenize(example.text_a) if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0: (max_seq_length - 2)] tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) # print('Tokens', tokens[:3]) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length return input_ids, input_mask, segment_ids, example.label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_single_example(ex_index, example, label_list, max_seq_length,\n tokenizer):\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n input_ids, segment_ids, input_mask = \\\n tokenizer.encode_text(text_a=example.text_a,\n ...
[ "0.71971315", "0.70364904", "0.7011036", "0.689469", "0.68478036", "0.65458274", "0.6541168", "0.64461863", "0.6425648", "0.63981766", "0.6375564", "0.6332188", "0.6307511", "0.6293627", "0.6263605", "0.6238047", "0.6184619", "0.6152681", "0.61517704", "0.61352295", "0.613507...
0.0
-1
Convert a set of `InputExample`s to a list of `InputFeatures`.
def convert_examples_to_features(tokenizer, examples, max_seq_length=256): input_ids, input_masks, segment_ids, labels = [], [], [], [] # for example in tqdm(examples, desc="Converting examples to features"): for example in examples: input_id, input_mask, segment_id, label = convert_single_example( tokenizer, example, max_seq_length ) input_ids.append(input_id) input_masks.append(input_mask) segment_ids.append(segment_id) labels.append(label) assert len(examples) == len(labels) return ( np.array(input_ids), np.array(input_masks), np.array(segment_ids), np.array(labels).reshape(-1, 1), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_examples_to_features(examples, tokenizer, is_training, args):\n all_features = []\n positive_features = 0\n negative_features = 0\n logger.info(\"Converting a list of NqExamples into InputFeatures ...\")\n for index, example in enumerate(tqdm(examples)):\n example_index = example[...
[ "0.73133975", "0.7042608", "0.69537765", "0.6883128", "0.68623626", "0.685001", "0.68385124", "0.6820685", "0.6765126", "0.67010623", "0.66909826", "0.66590303", "0.6587141", "0.6575663", "0.655151", "0.64223295", "0.64164066", "0.6334047", "0.6323665", "0.6314246", "0.628306...
0.69446
3
Returns the rank of the current process.
def rank() -> int: return dist.get_rank() if dist.is_initialized() else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rank(self):\n return self.lib.calculate_rank()", "def rank(self):\n if self._rank is None:\n self._rank = self.prufer_rank()\n return self._rank", "def get_rank(self):\n return int(self._rank)", "def get_rank(self):\n return self.__rank", "def get_rank(self...
[ "0.80822533", "0.80002564", "0.7949877", "0.79325294", "0.79249567", "0.7893032", "0.78871316", "0.7856118", "0.7848769", "0.7848769", "0.7848769", "0.7848769", "0.7848769", "0.7839193", "0.77974236", "0.7764616", "0.7760686", "0.76716596", "0.76598537", "0.7500097", "0.74324...
0.7283091
26
Returns the current world size (number of distributed processes).
def world_size() -> int: return dist.get_world_size() if dist.is_initialized() else 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_world_size() -> int:\n return collective.get_world_size()", "def get_world_size():\n if not torch.distributed.is_available():\n return 1\n if not torch.distributed.is_initialized():\n return 1\n return torch.distributed.get_world_size()", "def size():\n return int(os.enviro...
[ "0.86169875", "0.8585714", "0.80531013", "0.78157955", "0.7763954", "0.72711486", "0.7219287", "0.69150424", "0.68911326", "0.68068975", "0.671325", "0.668719", "0.6650058", "0.65553606", "0.6534457", "0.6533206", "0.64964646", "0.6494468", "0.6457273", "0.6454149", "0.644345...
0.86466646
0
Gathers this tensor from all processes. Supports backprop.
def gather(input: torch.Tensor) -> Tuple[torch.Tensor]: return GatherLayer.apply(input)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_comp...
[ "0.60234517", "0.59920496", "0.59397244", "0.59397244", "0.5855692", "0.5820956", "0.5770425", "0.57674265", "0.57518774", "0.56285346", "0.56285346", "0.56052834", "0.55982333", "0.5576598", "0.55694216", "0.5524458", "0.55227476", "0.5519531", "0.55187434", "0.54854375", "0...
0.0
-1
Returns an (n, n world_size) zero matrix with the diagonal for the rank of this process set to 1. Example output where n=3, the current process has rank 1, and there are
def eye_rank(n: int, device: Optional[torch.device] = None) -> torch.Tensor: rows = torch.arange(n, device=device, dtype=torch.long) cols = rows + rank() * n diag_mask = torch.zeros((n, n * world_size()), dtype=torch.bool) diag_mask[(rows, cols)] = True return diag_mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_game(n):\n matrix = []\n\n for i in range(n):\n matrix.append([0] * n)\n return matrix", "def make_blank_world():\n\t\tblank_array = [[Blank() for j in range(y_size + 1)] for i in range(x_size + 1)]\n\t\treturn blank_array", "def start_points(n, world):\n world[0, 0] ...
[ "0.61408174", "0.5977178", "0.58118933", "0.56802106", "0.56438154", "0.5553707", "0.554472", "0.5500732", "0.54714006", "0.54591256", "0.5432492", "0.54102844", "0.5402189", "0.5370259", "0.53477335", "0.5346161", "0.5288265", "0.5276366", "0.52738804", "0.52572614", "0.5255...
0.57331735
3
Decorator that only runs the function on the process with rank 0.
def rank_zero_only(fn): def wrapped(*args, **kwargs): if rank() == 0: return fn(*args, **kwargs) return wrapped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rank_zero_only(fn):\n\n @wraps(fn)\n def wrapped_fn(self, *args, **kwargs):\n if self.rank == 0:\n fn(self, *args, **kwargs)\n\n return wrapped_fn", "def _message_when_root(func):\n\n def decorated(*args, **kwargs):\n from armi import MPI_RANK\n\n if MPI_RANK == 0:...
[ "0.78528255", "0.66583896", "0.6142544", "0.6070626", "0.6058951", "0.60186154", "0.5988285", "0.5949869", "0.5930045", "0.5898354", "0.5718907", "0.5713488", "0.5649635", "0.55312955", "0.5502004", "0.5497844", "0.547936", "0.5471152", "0.539442", "0.5339954", "0.53384835", ...
0.78286487
1
Equivalent to print, but only runs on the process with rank 0.
def print_rank_zero(*args, **kwargs) -> None: print(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l_print_no_barrier(*args):\n print(comm.rank, ':', end=' ')\n for i in args:\n print(i, end=' ')\n # noinspection PyArgumentList\n print()", "def print_on_master(self, msg: str, process_group: ProcessGroup = None):\n rank = dist.get_rank(group=process_group)\n if rank == 0:\n...
[ "0.67231727", "0.671745", "0.6413891", "0.63975346", "0.6320614", "0.6140222", "0.59592324", "0.5801671", "0.5788699", "0.5557892", "0.54468316", "0.53032565", "0.5294671", "0.5267618", "0.5237459", "0.52162343", "0.51678485", "0.5136221", "0.5076834", "0.5038921", "0.5038565...
0.7382136
0
Returns a list of the methods the free energy is to be estimated with.
def getMethods(string): all_methods = ['TI','TI-CUBIC','DEXP','IEXP','GINS','GDEL','BAR','UBAR','RBAR','MBAR'] methods = ['TI','TI-CUBIC','DEXP','IEXP','BAR','MBAR'] if (numpy.array(['Sire']) == P.software.title()).any(): methods = ['TI','TI-CUBIC'] if not string: return methods def addRemove(string): operation = string[0] string = string[1:]+'+' method = '' for c in string: if c.isalnum(): method += c elif c=='_': method += '-' elif (c=='-' or c=='+'): if method in all_methods: if operation=='-': if method in methods: methods.remove(method) else: if not method in methods: methods.append(method) method = '' operation = c else: parser.error("\nThere is no '%s' in the list of supported methods." % method) else: parser.error("\nUnknown character '%s' in the method string is found." % c) return if string=='ALL': methods = all_methods else: primo = string[0] if primo.isalpha(): methods = string.replace('+', ' ').replace('_', '-').split() methods = [m for m in methods if m in all_methods] elif primo=='+' or primo=='-': addRemove(string) else: parser.error("\nUnknown character '%s' in the method string is found." % primo) return methods
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def available_methods():\n return {mc.method_id: mc for mc in MethodFactory.method_classes}", "def list_methods(self):\n return list(self.methods.keys())", "def get_available_shipping_methods(self):\n return [\n m for m\n in ShippingMethod.objects.available(shop=self....
[ "0.62939", "0.62060606", "0.6165162", "0.6080959", "0.6052875", "0.5965192", "0.5954748", "0.58911914", "0.58881974", "0.586676", "0.584898", "0.5769478", "0.5767808", "0.5711987", "0.5688772", "0.5688331", "0.5685699", "0.5681339", "0.567954", "0.5675728", "0.56619275", "0...
0.5259336
45
Identifies uncorrelated samples and updates the arrays of the reduced potential energy and dhdlt retaining data entries of these samples only. 'sta' and 'fin' are the starting and final snapshot positions to be read, both are arrays of dimension K.
def uncorrelate(sta, fin, do_dhdl=False): if not P.uncorr_threshold: if P.software.title()=='Sire': return dhdlt, nsnapshots, None return dhdlt, nsnapshots, u_klt u_kln = numpy.zeros([K,K,max(fin-sta)], numpy.float64) # u_kln[k,m,n] is the reduced potential energy of uncorrelated sample index n from state k evaluated at state m N_k = numpy.zeros(K, int) # N_k[k] is the number of uncorrelated samples from state k g = numpy.zeros(K,float) # autocorrelation times for the data if do_dhdl: dhdl = numpy.zeros([K,n_components,max(fin-sta)], float) #dhdl is value for dhdl for each component in the file at each time. print "\n\nNumber of correlated and uncorrelated samples:\n\n%6s %12s %12s %12s\n" % ('State', 'N', 'N_k', 'N/N_k') UNCORR_OBSERVABLE = {'Gromacs':P.uncorr,'Amber':'dhdl', 'Sire':'dhdl', 'Desmond':'dE', 'Gomc':P.uncorr}[P.software.title()] if UNCORR_OBSERVABLE == 'dhdl': # Uncorrelate based on dhdl values at a given lambda. for k in range(K): # Sum up over those energy components that are changing. # if there are repeats, we need to use the lchange[k] from the last repeated state. lastl = k for l in range(K): if numpy.array_equal(lv[k],lv[l]): lastl = l dhdl_sum = numpy.sum(dhdlt[k, lchange[lastl], sta[k]:fin[k]], axis=0) # Determine indices of uncorrelated samples from potential autocorrelation analysis at state k #NML: Set statistical inefficiency (g) = 1 if vector is all 0 if not numpy.any(dhdl_sum): #print "WARNING: Found all zeros for Lambda={}\n Setting statistical inefficiency g=1.".format(k) g[k] = 1 else: # (alternatively, could use the energy differences -- here, we will use total dhdl). g[k] = pymbar.timeseries.statisticalInefficiency(dhdl_sum) indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dhdl_sum, g=g[k])) # indices of uncorrelated samples N_uncorr = len(indices) # number of uncorrelated samples # Handle case where we end up with too few. if N_uncorr < P.uncorr_threshold: if do_dhdl: print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N_uncorr, k) indices = sta[k] + numpy.arange(len(dhdl_sum)) N = len(indices) else: N = N_uncorr N_k[k] = N # Store the number of uncorrelated samples from state k. if not (u_klt is None): for l in range(K): u_kln[k,l,0:N] = u_klt[k,l,indices] if do_dhdl: print "%6s %12s %12s %12.2f" % (k, N_uncorr, N_k[k], g[k]) for n in range(n_components): dhdl[k,n,0:N] = dhdlt[k,n,indices] if UNCORR_OBSERVABLE == 'dhdl_all': # Uncorrelate based on dhdl values at a given lambda. for k in range(K): # Sum up over the energy components; notice, that only the relevant data is being used in the third dimension. dhdl_sum = numpy.sum(dhdlt[k,:,sta[k]:fin[k]], axis=0) # Determine indices of uncorrelated samples from potential autocorrelation analysis at state k # (alternatively, could use the energy differences -- here, we will use total dhdl). g[k] = pymbar.timeseries.statisticalInefficiency(dhdl_sum) indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dhdl_sum, g=g[k])) # indices of uncorrelated samples N = len(indices) # number of uncorrelated samples # Handle case where we end up with too few. if N < P.uncorr_threshold: if do_dhdl: print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k) indices = sta[k] + numpy.arange(len(dhdl_sum)) N = len(indices) N_k[k] = N # Store the number of uncorrelated samples from state k. if not (u_klt is None): for l in range(K): u_kln[k,l,0:N] = u_klt[k,l,indices] if do_dhdl: print "%6s %12s %12s %12.2f" % (k, fin[k], N_k[k], g[k]) for n in range(n_components): dhdl[k,n,0:N] = dhdlt[k,n,indices] if UNCORR_OBSERVABLE == 'dE': # Uncorrelate based on energy differences between lambdas. for k in range(K): # Sum up over the energy components as above using only the relevant data; here we use energy differences # Determine indices of uncorrelated samples from potential autocorrelation analysis at state k dE = u_klt[k,k+1,sta[k]:fin[k]] if not k==K-1 else u_klt[k,k-1,sta[k]:fin[k]] g[k] = pymbar.timeseries.statisticalInefficiency(dE) indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dE, g=g[k])) # indices of uncorrelated samples N = len(indices) # number of uncorrelated samples # Handle case where we end up with too few. if N < P.uncorr_threshold: print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k) indices = sta[k] + numpy.arange(len(dE)) N = len(indices) N_k[k] = N # Store the number of uncorrelated samples from state k. if not (u_klt is None): for l in range(K): u_kln[k,l,0:N] = u_klt[k,l,indices] if do_dhdl: return (dhdl, N_k, u_kln) return (N_k, u_kln)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data_stoichometry(fasta, bams, regions, features, samples, fracs, \n maxReads=1000, strands=\"+-\", nn=1):\n # get storage\n k = 2*nn+1\n fi = 0\n sam = pysam.AlignmentFile(bams[0])\n region2data = {}\n sample2idx = {s: i for i, s in enumerate(samples)}; print(s...
[ "0.5908855", "0.54569936", "0.5322768", "0.5313147", "0.5301625", "0.53015983", "0.5290556", "0.52887326", "0.528543", "0.5255901", "0.52539086", "0.52324796", "0.52108496", "0.52033883", "0.5170586", "0.5168382", "0.5167442", "0.51477975", "0.5144637", "0.5129123", "0.511288...
0.68044555
0
Computes the MBAR free energy given the reduced potential and the number of relevant entries in it.
def estimatewithMBAR(u_kln, N_k, reltol, regular_estimate=False): def plotOverlapMatrix(O): """Plots the probability of observing a sample from state i (row) in state j (column). For convenience, the neigboring state cells are fringed in bold.""" max_prob = O.max() fig = pl.figure(figsize=(K/2.,K/2.)) fig.add_subplot(111, frameon=False, xticks=[], yticks=[]) for i in range(K): if i!=0: pl.axvline(x=i, ls='-', lw=0.5, color='k', alpha=0.25) pl.axhline(y=i, ls='-', lw=0.5, color='k', alpha=0.25) for j in range(K): if O[j,i] < 0.005: ii = '' elif O[j,i] > 0.995: ii = '1.00' else: ii = ("%.2f" % O[j,i])[1:] alf = O[j,i]/max_prob pl.fill_between([i,i+1], [K-j,K-j], [K-(j+1),K-(j+1)], color='k', alpha=alf) pl.annotate(ii, xy=(i,j), xytext=(i+0.5,K-(j+0.5)), size=8, textcoords='data', va='center', ha='center', color=('k' if alf < 0.5 else 'w')) if P.bSkipLambdaIndex: ks = [int(l) for l in P.bSkipLambdaIndex.split('-')] ks = numpy.delete(numpy.arange(K+len(ks)), ks) else: ks = range(K) for i in range(K): pl.annotate(ks[i], xy=(i+0.5, 1), xytext=(i+0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k') pl.annotate(ks[i], xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K-(i+0.5)), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k') pl.annotate('$\lambda$', xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k') pl.plot([0,K], [0,0], 'k-', lw=4.0, solid_capstyle='butt') pl.plot([K,K], [0,K], 'k-', lw=4.0, solid_capstyle='butt') pl.plot([0,0], [0,K], 'k-', lw=2.0, solid_capstyle='butt') pl.plot([0,K], [K,K], 'k-', lw=2.0, solid_capstyle='butt') cx = sorted(2*range(K+1)) cy = sorted(2*range(K+1), reverse=True) pl.plot(cx[2:-1], cy[1:-2], 'k-', lw=2.0) pl.plot(numpy.array(cx[2:-3])+1, cy[1:-4], 'k-', lw=2.0) pl.plot(cx[1:-2], numpy.array(cy[:-3])-1, 'k-', lw=2.0) pl.plot(cx[1:-4], numpy.array(cy[:-5])-2, 'k-', lw=2.0) pl.xlim(-1, K) pl.ylim(0, K+1) pl.savefig(os.path.join(P.output_directory, 'O_MBAR.pdf'), bbox_inches='tight', pad_inches=0.0) pl.close(fig) return if regular_estimate: print "\nEstimating the free energy change with MBAR..." MBAR = pymbar.mbar.MBAR(u_kln, N_k, verbose = P.verbose, relative_tolerance = reltol, initialize = P.init_with) # Get matrix of dimensionless free energy differences and uncertainty estimate. (Deltaf_ij, dDeltaf_ij, theta_ij ) = MBAR.getFreeEnergyDifferences(uncertainty_method='svd-ew', return_theta = True) if P.verbose: print "Matrix of free energy differences\nDeltaf_ij:\n%s\ndDeltaf_ij:\n%s" % (Deltaf_ij, dDeltaf_ij) if regular_estimate: if P.overlap: print "The overlap matrix is..." O = MBAR.computeOverlap()[2] for k in range(K): line = '' for l in range(K): line += ' %5.2f ' % O[k, l] print line plotOverlapMatrix(O) print "\nFor a nicer figure look at 'O_MBAR.pdf'" return (Deltaf_ij, dDeltaf_ij) return (Deltaf_ij[0,K-1]/P.beta_report, dDeltaf_ij[0,K-1]/P.beta_report)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bethe_free_energy(self, potential):\n xn, xe, lpn, lpe, alpha = self(None, full_out=False)\n fn, fe = potential((xn, xe))\n bfe = -(tf.reduce_sum((fn + self.tw * lpn) * self.wn * alpha, [2, 3, 4]) +\n tf.reduce_sum((fe - lpe) * self.we * alpha, [2, 3, 4]))\n return bf...
[ "0.6321866", "0.6285089", "0.62403625", "0.6177378", "0.61164016", "0.5873943", "0.58185613", "0.58177686", "0.5722714", "0.56651354", "0.56518394", "0.56394243", "0.55663705", "0.55481577", "0.55286837", "0.5522224", "0.54651517", "0.5449803", "0.5437534", "0.54333156", "0.5...
0.0
-1
Plots the probability of observing a sample from state i (row) in state j (column). For convenience, the neigboring state cells are fringed in bold.
def plotOverlapMatrix(O): max_prob = O.max() fig = pl.figure(figsize=(K/2.,K/2.)) fig.add_subplot(111, frameon=False, xticks=[], yticks=[]) for i in range(K): if i!=0: pl.axvline(x=i, ls='-', lw=0.5, color='k', alpha=0.25) pl.axhline(y=i, ls='-', lw=0.5, color='k', alpha=0.25) for j in range(K): if O[j,i] < 0.005: ii = '' elif O[j,i] > 0.995: ii = '1.00' else: ii = ("%.2f" % O[j,i])[1:] alf = O[j,i]/max_prob pl.fill_between([i,i+1], [K-j,K-j], [K-(j+1),K-(j+1)], color='k', alpha=alf) pl.annotate(ii, xy=(i,j), xytext=(i+0.5,K-(j+0.5)), size=8, textcoords='data', va='center', ha='center', color=('k' if alf < 0.5 else 'w')) if P.bSkipLambdaIndex: ks = [int(l) for l in P.bSkipLambdaIndex.split('-')] ks = numpy.delete(numpy.arange(K+len(ks)), ks) else: ks = range(K) for i in range(K): pl.annotate(ks[i], xy=(i+0.5, 1), xytext=(i+0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k') pl.annotate(ks[i], xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K-(i+0.5)), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k') pl.annotate('$\lambda$', xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k') pl.plot([0,K], [0,0], 'k-', lw=4.0, solid_capstyle='butt') pl.plot([K,K], [0,K], 'k-', lw=4.0, solid_capstyle='butt') pl.plot([0,0], [0,K], 'k-', lw=2.0, solid_capstyle='butt') pl.plot([0,K], [K,K], 'k-', lw=2.0, solid_capstyle='butt') cx = sorted(2*range(K+1)) cy = sorted(2*range(K+1), reverse=True) pl.plot(cx[2:-1], cy[1:-2], 'k-', lw=2.0) pl.plot(numpy.array(cx[2:-3])+1, cy[1:-4], 'k-', lw=2.0) pl.plot(cx[1:-2], numpy.array(cy[:-3])-1, 'k-', lw=2.0) pl.plot(cx[1:-4], numpy.array(cy[:-5])-2, 'k-', lw=2.0) pl.xlim(-1, K) pl.ylim(0, K+1) pl.savefig(os.path.join(P.output_directory, 'O_MBAR.pdf'), bbox_inches='tight', pad_inches=0.0) pl.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __draw(self, state:dict):\n _, ax = plt.subplots()\n ax.set_axis_off()\n tb = Table(ax, bbox=[0,0,1,1])\n\n width = height = 1.0 /9 \n\n\n for key in self.state.keys():\n # Add cells\n i,j = self.__display_table_map[key]\n tb.add_cell(i, j, wi...
[ "0.62075657", "0.6059904", "0.60067856", "0.5875241", "0.58613783", "0.5835459", "0.57914424", "0.5699644", "0.5671611", "0.56513095", "0.5606568", "0.5569045", "0.5562588", "0.55362916", "0.5512227", "0.550696", "0.54794586", "0.5475376", "0.54668474", "0.54337996", "0.54149...
0.0
-1
Fills out the results table linewise.
def printLine(str1, str2, d1=None, d2=None): print str1, text = str1 for name in P.methods: if d1 == 'plain': print str2, text += ' ' + str2 if d1 == 'name': print str2 % (name, P.units), text += ' ' + str2 % (name, P.units) if d1 and d2: print str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report), text += ' ' + str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report) print '' outtext.append(text + '\n') return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_table(self):\n self.col_widths = []\n self.result = \"\"", "def generate_table(self, rows):\n ...", "def fill_table(self, executer, tree, cursor, table):\n counter = 0\n table_content = executer.lots_of_eggs(cursor, table)\n for line in table_content:\n ...
[ "0.65478563", "0.6272289", "0.62374914", "0.6087662", "0.5902959", "0.58083314", "0.57289517", "0.5723789", "0.5701637", "0.56659144", "0.56502664", "0.563632", "0.5613999", "0.5606675", "0.55917966", "0.5551868", "0.5539367", "0.55313087", "0.5485055", "0.5482998", "0.547419...
0.0
-1
Plots the free energy change computed using the equilibrated snapshots between the proper target time frames (f_ts and r_ts) in both forward (data points are stored in F_df and F_ddf) and reverse (data points are stored in R_df and R_ddf) directions.
def plotdFvsTime(f_ts, r_ts, F_df, R_df, F_ddf, R_ddf): fig = pl.figure(figsize=(8,6)) ax = fig.add_subplot(111) pl.setp(ax.spines['bottom'], color='#D2B9D3', lw=3, zorder=-2) pl.setp(ax.spines['left'], color='#D2B9D3', lw=3, zorder=-2) for dire in ['top', 'right']: ax.spines[dire].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') max_fts = max(f_ts) rr_ts = [aa/max_fts for aa in f_ts[::-1]] f_ts = [aa/max_fts for aa in f_ts] r_ts = [aa/max_fts for aa in r_ts] line0 = pl.fill_between([r_ts[0], f_ts[-1]], R_df[0]-R_ddf[0], R_df[0]+R_ddf[0], color='#D2B9D3', zorder=-5) for i in range(len(f_ts)): line1 = pl.plot([f_ts[i]]*2, [F_df[i]-F_ddf[i], F_df[i]+F_ddf[i]], color='#736AFF', ls='-', lw=3, solid_capstyle='round', zorder=1) line11 = pl.plot(f_ts, F_df, color='#736AFF', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#736AFF', ms=12, zorder=2) for i in range(len(rr_ts)): line2 = pl.plot([rr_ts[i]]*2, [R_df[i]-R_ddf[i], R_df[i]+R_ddf[i]], color='#C11B17', ls='-', lw=3, solid_capstyle='round', zorder=3) line22 = pl.plot(rr_ts, R_df, color='#C11B17', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#C11B17', ms=12, zorder=4) pl.xlim(r_ts[0], f_ts[-1]) pl.xticks(r_ts[::2] + f_ts[-1:], fontsize=10) pl.yticks(fontsize=10) leg = pl.legend((line1[0], line2[0]), (r'$Forward$', r'$Reverse$'), loc=1, prop=FP(size=18), frameon=False) pl.xlabel(r'$\mathrm{Fraction\/of\/the\/simulation\/step}$', fontsize=16, color='#151B54') pl.ylabel(r'$\mathrm{\Delta G\/%s}$' % P.units, fontsize=16, color='#151B54') pl.xticks(f_ts, ['%.2f' % i for i in f_ts]) pl.tick_params(axis='x', color='#D2B9D3') pl.tick_params(axis='y', color='#D2B9D3') pl.savefig(os.path.join(P.output_directory, 'dF_t.pdf')) pl.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')...
[ "0.6313317", "0.6141398", "0.6133596", "0.61213934", "0.58762604", "0.5865189", "0.5831045", "0.5801431", "0.57747483", "0.5743009", "0.57139415", "0.56994927", "0.56850445", "0.5679581", "0.5657056", "0.5632359", "0.56177044", "0.5578176", "0.5577838", "0.55500615", "0.55456...
0.65924156
0
Plots the free energy differences evaluated for each pair of adjacent states for all methods.
def plotdFvsLambda1(): x = numpy.arange(len(df_allk)) if x[-1]<8: fig = pl.figure(figsize = (8,6)) else: fig = pl.figure(figsize = (len(x),6)) width = 1./(len(P.methods)+1) elw = 30*width colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'} lines = tuple() for name in P.methods: y = [df_allk[i][name]/P.beta_report for i in x] ye = [ddf_allk[i][name]/P.beta_report for i in x] line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.1*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw)) lines += (line[0],) pl.xlabel('States', fontsize=12, color='#151B54') pl.ylabel('$\Delta G$ '+P.units, fontsize=12, color='#151B54') pl.xticks(x+0.5*width*len(P.methods), tuple(['%d--%d' % (i, i+1) for i in x]), fontsize=8) pl.yticks(fontsize=8) pl.xlim(x[0], x[-1]+len(lines)*width) ax = pl.gca() for dir in ['right', 'top', 'bottom']: ax.spines[dir].set_color('none') ax.yaxis.set_ticks_position('left') for tick in ax.get_xticklines(): tick.set_visible(False) leg = pl.legend(lines, tuple(P.methods), loc=3, ncol=2, prop=FP(size=10), fancybox=True) leg.get_frame().set_alpha(0.5) pl.title('The free energy change breakdown', fontsize = 12) pl.savefig(os.path.join(P.output_directory, 'dF_state_long.pdf'), bbox_inches='tight') pl.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vis_difference(self):\n print(self.init_vec)\n\n init = self.init_output.numpy()\n\n alphas = np.linspace(0, 1, 20)\n for i, alpha in enumerate(alphas):\n\n display.clear_output(wait=True)\n norm = [torch.linalg.norm(torch.tensor(\n self.init_vec...
[ "0.6425327", "0.6316091", "0.6314955", "0.60346085", "0.5971509", "0.59265935", "0.585417", "0.58146745", "0.5805727", "0.5739992", "0.5732385", "0.57270944", "0.57151026", "0.560639", "0.5592231", "0.5590342", "0.55784935", "0.5556301", "0.55454224", "0.55395085", "0.5513448...
0.5443678
29
Plots the free energy differences evaluated for each pair of adjacent states for all methods. The layout is approximately 'nb' bars per subplot.
def plotdFvsLambda2(nb=10): x = numpy.arange(len(df_allk)) if len(x) < nb: return xs = numpy.array_split(x, len(x)/nb+1) mnb = max([len(i) for i in xs]) fig = pl.figure(figsize = (8,6)) width = 1./(len(P.methods)+1) elw = 30*width colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'} ndx = 1 for x in xs: lines = tuple() ax = pl.subplot(len(xs), 1, ndx) for name in P.methods: y = [df_allk[i][name]/P.beta_report for i in x] ye = [ddf_allk[i][name]/P.beta_report for i in x] line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw)) lines += (line[0],) for dir in ['left', 'right', 'top', 'bottom']: if dir == 'left': ax.yaxis.set_ticks_position(dir) else: ax.spines[dir].set_color('none') pl.yticks(fontsize=10) ax.xaxis.set_ticks([]) for i in x+0.5*width*len(P.methods): ax.annotate('$\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center') pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x))) ndx += 1 leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\mathrm{\Delta G\/%s\/}\mathit{vs.}\/\mathrm{lambda\/pair}$' % P.units, fancybox=True) leg.get_frame().set_alpha(0.5) pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight') pl.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n...
[ "0.6250733", "0.6131744", "0.61037934", "0.6092076", "0.60732573", "0.59801644", "0.59770113", "0.5963969", "0.59115875", "0.58646387", "0.5833313", "0.58220226", "0.5787901", "0.57865995", "0.5741407", "0.5739193", "0.57294893", "0.57286334", "0.5718547", "0.57135636", "0.56...
0.62457854
1
Plots the ave_dhdl array as a function of the lambda value. If (TI and TICUBIC in methods) plots the TI integration area and the TICUBIC interpolation curve, elif (only one of them in methods) plots the integration area of the method.
def plotTI(): min_dl = dlam[dlam != 0].min() S = int(0.4/min_dl) fig = pl.figure(figsize = (8,6)) ax = fig.add_subplot(1,1,1) ax.spines['bottom'].set_position('zero') ax.spines['top'].set_color('none') ax.spines['right'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') for k, spine in ax.spines.items(): spine.set_zorder(12.2) xs, ndx, dx = [0], 0, 0.001 colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y'] min_y, max_y = 0, 0 lines = tuple() ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper lv_names2 = [] for j in range(n_components): y = ave_dhdl[:,j] if not (y == 0).all(): lv_names2.append(r'$%s$' % P.lv_names[j].capitalize()) for j in range(n_components): y = ave_dhdl[:,j] if not (y == 0).all(): # Get the coordinates. lj = lchange[:,j] x = lv[:,j][lj] y = y[lj]/P.beta_report if 'TI' in P.methods: # Plot the TI integration area. ss = 'TI' for i in range(len(x)-1): min_y = min(y.min(), min_y) max_y = max(y.max(), max_y) #pl.plot(x,y) if i%2==0: pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0) else: pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5) xlegend = [-100*wnum for wnum in range(len(lv_names2))] pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper if 'TI-CUBIC' in P.methods and not cubspl[j]==0: # Plot the TI-CUBIC interpolation curve. ss += ' and TI-CUBIC' xnew = numpy.arange(0, 1+dx, dx) ynew = cubspl[j].interpolate(y, xnew) min_y = min(ynew.min(), min_y) max_y = max(ynew.max(), max_y) pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0) else: # Plot the TI-CUBIC integration area. ss = 'TI-CUBIC' for i in range(len(x)-1): xnew = numpy.arange(x[i], x[i+1]+dx, dx) ynew = cubspl[j].interpolate(y, xnew) ynew[0], ynew[-1] = y[i], y[i+1] min_y = min(ynew.min(), min_y) max_y = max(ynew.max(), max_y) if i%2==0: pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0) else: pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5) # Store the abscissa values and update the subplot index. xs += (x+ndx).tolist()[1:] ndx += 1 # Make sure the tick labels are not overcrowded. xs = numpy.array(xs) dl_mat = numpy.array([xs-i for i in xs]) ri = range(len(xs)) def getInd(r=ri, z=[0]): primo = r[0] min_dl=ndx*0.02*2**(primo>10) if dl_mat[primo].max()<min_dl: return z for i in r: for j in range(len(xs)): if dl_mat[i,j]>min_dl: z.append(j) return getInd(ri[j:], z) xt = [i if (i in getInd()) else '' for i in range(K)] pl.xticks(xs[1:], xt[1:], fontsize=10) pl.yticks(fontsize=10) #ax = pl.gca() #for label in ax.get_xticklabels(): # label.set_bbox(dict(fc='w', ec='None', alpha=0.5)) # Remove the abscissa ticks and set up the axes limits. for tick in ax.get_xticklines(): tick.set_visible(False) pl.xlim(0, ndx) min_y *= 1.01 max_y *= 1.01 pl.ylim(min_y, max_y) for i,j in zip(xs[1:], xt[1:]): pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54') if ndx>1: lenticks = len(ax.get_ymajorticklabels()) - 1 if min_y<0: lenticks -= 1 if lenticks < 5: from matplotlib.ticker import AutoMinorLocator as AML ax.yaxis.set_minor_locator(AML()) pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12) pl.ylabel(r'$\mathrm{\langle{\frac{ \partial U } { \partial \lambda }}\rangle_{\lambda}\/%s}$' % P.units, fontsize=20, color='#151B54') pl.annotate('$\mathit{\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54') if not P.software.title()=='Sire': lege = ax.legend(prop=FP(size=14), frameon=False, loc=1) for l in lege.legendHandles: l.set_linewidth(10) pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf')) pl.close(fig) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotdFvsLambda1():\n x = numpy.arange(len(df_allk))\n if x[-1]<8:\n fig = pl.figure(figsize = (8,6))\n else:\n fig = pl.figure(figsize = (len(x),6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431',...
[ "0.5631575", "0.56224716", "0.55810195", "0.5569204", "0.544854", "0.54324406", "0.5401097", "0.5397317", "0.53738916", "0.53561234", "0.5292456", "0.52792794", "0.52540356", "0.52267444", "0.5214065", "0.52021885", "0.5194347", "0.5180309", "0.5170628", "0.51551074", "0.5147...
0.5738737
0
A graphical representation of what Bennett calls 'CurveFitting Method'.
def plotCFM(u_kln, N_k, num_bins=100): print "Plotting the CFM figure..." def leaveTicksOnlyOnThe(xdir, ydir, axis): dirs = ['left', 'right', 'top', 'bottom'] axis.xaxis.set_ticks_position(xdir) axis.yaxis.set_ticks_position(ydir) return def plotdg_vs_dU(yy, df_allk, ddf_allk): sq = (len(yy))**0.5 h = int(sq) w = h + 1 + 1*(sq-h>0.5) scale = round(w/3., 1)+0.4 if len(yy)>13 else 1 sf = numpy.ceil(scale*3) if scale>1 else 0 fig = pl.figure(figsize = (8*scale,6*scale)) matplotlib.rc('axes', facecolor = '#E3E4FA') matplotlib.rc('axes', edgecolor = 'white') if P.bSkipLambdaIndex: ks = [int(l) for l in P.bSkipLambdaIndex.split('-')] ks = numpy.delete(numpy.arange(K+len(ks)), ks) else: ks = range(K) for i, (xx_i, yy_i) in enumerate(yy): ax = pl.subplot(h, w, i+1) ax.plot(xx_i, yy_i, color='r', ls='-', lw=3, marker='o', mec='r') leaveTicksOnlyOnThe('bottom', 'left', ax) ax.locator_params(axis='x', nbins=5) ax.locator_params(axis='y', nbins=6) ax.fill_between(xx_i, df_allk[i]['BAR'] - ddf_allk[i]['BAR'], df_allk[i]['BAR'] + ddf_allk[i]['BAR'], color='#D2B9D3', zorder=-1) ax.annotate(r'$\mathrm{%d-%d}$' % (ks[i], ks[i+1]), xy=(0.5, 0.9), xycoords=('axes fraction', 'axes fraction'), xytext=(0, -2), size=14, textcoords='offset points', va='top', ha='center', color='#151B54', bbox = dict(fc='w', ec='none', boxstyle='round', alpha=0.5)) pl.xlim(xx_i.min(), xx_i.max()) pl.annotate(r'$\mathrm{\Delta U_{i,i+1}\/(reduced\/units)}$', xy=(0.5, 0.03), xytext=(0.5, 0), xycoords=('figure fraction', 'figure fraction'), size=20+sf, textcoords='offset points', va='center', ha='center', color='#151B54') pl.annotate(r'$\mathrm{\Delta g_{i+1,i}\/(reduced\/units)}$', xy=(0.06, 0.5), xytext=(0, 0.5), rotation=90, xycoords=('figure fraction', 'figure fraction'), size=20+sf, textcoords='offset points', va='center', ha='center', color='#151B54') pl.savefig(os.path.join(P.output_directory, 'cfm.pdf')) pl.close(fig) return def findOptimalMinMax(ar): c = zip(*numpy.histogram(ar, bins=10)) thr = int(ar.size/8.) mi, ma = ar.min(), ar.max() for (i,j) in c: if i>thr: mi = j break for (i,j) in c[::-1]: if i>thr: ma = j break return mi, ma def stripZeros(a, aa, b, bb): z = numpy.array([a, aa[:-1], b, bb[:-1]]) til = 0 for i,j in enumerate(a): if j>0: til = i break z = z[:, til:] til = 0 for i,j in enumerate(b[::-1]): if j>0: til = i break z = z[:, :len(a)+1-til] a, aa, b, bb = z return a, numpy.append(aa, 100), b, numpy.append(bb, 100) K = len(u_kln) yy = [] for k in range(0, K-1): upto = min(N_k[k], N_k[k+1]) righ = -u_kln[k,k+1, : upto] left = u_kln[k+1,k, : upto] min1, max1 = findOptimalMinMax(righ) min2, max2 = findOptimalMinMax(left) mi = min(min1, min2) ma = max(max1, max2) (counts_l, xbins_l) = numpy.histogram(left, bins=num_bins, range=(mi, ma)) (counts_r, xbins_r) = numpy.histogram(righ, bins=num_bins, range=(mi, ma)) counts_l, xbins_l, counts_r, xbins_r = stripZeros(counts_l, xbins_l, counts_r, xbins_r) counts_r, xbins_r, counts_l, xbins_l = stripZeros(counts_r, xbins_r, counts_l, xbins_l) with numpy.errstate(divide='ignore', invalid='ignore'): log_left = numpy.log(counts_l) - 0.5*xbins_l[:-1] log_righ = numpy.log(counts_r) + 0.5*xbins_r[:-1] diff = log_left - log_righ yy.append((xbins_l[:-1], diff)) plotdg_vs_dU(yy, df_allk, ddf_allk) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_fitted_curve(self):\n\t\tcxpX, cxpY = self.curveCXP.cxp_X, self.curveCXP.cxp_Y\n\t\tfittedX, fittedY = self.fitted_X, self.fitted_Y\n\t\tself.curveFig.plot(cxpX, cxpY, 'o', fittedX, fittedY)\n\t\tpass", "def plot_fitting_coefficients(self):\n from matplotlib import pyplot as plt\n coeff = ...
[ "0.7055758", "0.66392714", "0.65292656", "0.6458982", "0.62626904", "0.62592036", "0.61264443", "0.6093298", "0.6078136", "0.6074372", "0.60485107", "0.60373193", "0.6032992", "0.6027297", "0.5985532", "0.59401494", "0.59209955", "0.5901951", "0.5901479", "0.58885133", "0.585...
0.0
-1
Searches for winning sequence in columns.
def check_columns(self, win: list) -> bool: for row in range(self.size): column = [self.tags[x][row] for x in range(self.size)] for j in range(len(column) - len(win) + 1): if win == column[j:j+self.win_condition]: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def col_win(board, player):\n for row in board.T:\n if check_row(row, player):\n return True\n return False", "def check_columns():\n global game_still_going\n # Check if any of the rows have all the same value.\n column1 = board[0] == board[3] == board[6] != '_'\n column2 = b...
[ "0.6576831", "0.6555307", "0.64840585", "0.6481743", "0.6447641", "0.63892657", "0.6347227", "0.63392514", "0.629483", "0.62593347", "0.6223977", "0.6137701", "0.60959685", "0.6069051", "0.60535437", "0.6050567", "0.60471857", "0.603175", "0.59898293", "0.5885481", "0.5832774...
0.6735366
0
Searches for winning sequence in rows.
def check_rows(self, win: list) -> bool: for row in self.tags: for j in range(len(row) - len(win) + 1): if win == row[j:j+self.win_condition]: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_next_win(self, player):\n for i, j, k in self.winning_cases:\n if self.game_board[i] == player and \\\n self.game_board[j] == player and \\\n self.game_board[k] == ' ':\n return k\n elif self.game_board[j] == player and \\\n ...
[ "0.6718347", "0.6526012", "0.6286749", "0.6253905", "0.62017727", "0.61693555", "0.6116269", "0.6089422", "0.6063677", "0.60391885", "0.6026189", "0.59956145", "0.59509", "0.5940875", "0.5902935", "0.58904356", "0.5852451", "0.58488286", "0.58410406", "0.5821735", "0.58202726...
0.65215236
2
Check for winning sequence in all possible diagonals that are at least as long as winning condition.
def check_diagonals(self, win: list) -> bool: for i in range(self.size - self.win_condition + 1): # [x x ] # [ x x ] # [ x x] # [ x] diagonal = [] x = i y = 0 for j in range(self.size - i): diagonal.append(self.tags[x][y]) x += 1 y += 1 for j in range(len(diagonal) - len(win) + 1): if win == diagonal[j:j + self.win_condition]: return True # [x ] # [x x ] # [ x x ] # [ x x] diagonal = [] x = 0 y = i for j in range(self.size - i): diagonal.append(self.tags[x][y]) x += 1 y += 1 for j in range(len(diagonal) - len(win) + 1): if win == diagonal[j:j + self.win_condition]: return True # [ x x] # [ x x ] # [x x ] # [x ] diagonal = [] x = self.size - 1 - i y = 0 for j in range(self.size - i): diagonal.append(self.tags[x][y]) x -= 1 y += 1 for j in range(len(diagonal) - len(win) + 1): if win == diagonal[j:j + self.win_condition]: return True # [ x] # [ x x] # [ x x ] # [x x ] diagonal = [] x = self.size - 1 y = 0 + i for j in range(self.size - i): diagonal.append(self.tags[x][y]) x -= 1 y += 1 for j in range(len(diagonal) - len(win) + 1): if win == diagonal[j:j + self.win_condition]: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_winning(self, curr_state):\n rows = [[0,1,2], [3,4,5], [6,7,8]]\n columns = [[0,3,6], [1,4,7], [2,5,8]]\n diagonal = [[0,4,8], [2,4,6]]\n total_checks = rows + columns + diagonal\n for row in total_checks:\n sum = 0\n count = 0\n for pos in...
[ "0.69447654", "0.68922657", "0.6877511", "0.6859907", "0.6785897", "0.6660476", "0.66416174", "0.66363674", "0.6619749", "0.6578498", "0.65732425", "0.65712273", "0.653169", "0.6508", "0.6497785", "0.64893246", "0.64755654", "0.6452486", "0.6433075", "0.64247596", "0.64144456...
0.73726594
0
Checks if in any dimension the winning line is achieved.
def check_if_win(self, tag: str) -> bool: win_line = [tag]*self.win_condition return self.check_rows(win_line) or self.check_columns(win_line) or self.check_diagonals(win_line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isLineAt(self, x, y, dx, dy):\n\n initialValue = self.board[x][y]\n #checks a cell to see if there is a piece there\n if initialValue != 0:\n #loops though 3 times in a certain direction to see\n #if there is a winning configuration\n for i in range(3):\n ...
[ "0.7173354", "0.70985824", "0.6997996", "0.6947743", "0.68140584", "0.6719319", "0.6712507", "0.6712507", "0.6701225", "0.6684429", "0.6665015", "0.665667", "0.66564786", "0.662193", "0.6610499", "0.6598199", "0.65881664", "0.6571598", "0.6556325", "0.6543641", "0.6531366", ...
0.6457668
27
Checks if the board is fully packed with figures, which in practice means => if the tags array is full.
def full_board(self) -> bool: counter = 0 for column in self.tags: if None in column: counter += 1 return counter == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _board_is_full(self):\n return (self.get_counts()[0] + self.get_counts()[1] == self._num_rows * self._num_cols)", "def is_full(board):\r\n return False", "def is_full(board):\n return False", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n ...
[ "0.6820874", "0.676673", "0.6736874", "0.6717051", "0.6591668", "0.65669096", "0.6556075", "0.6527779", "0.6506026", "0.6469064", "0.64647114", "0.6442245", "0.6417079", "0.6390676", "0.63876194", "0.637983", "0.6371609", "0.6350671", "0.6347817", "0.6293271", "0.62686855", ...
0.74219203
0
Checks for empty spaces in the tags list. If the empty space is found it's coordinates are being packed into tuple and into new list.
def check_for_moves(self) -> list: avail_moves = [] for x in range(self.size): for y in range(self.size): if self.tags[x][y] is None: avail_moves.append((x, y)) return avail_moves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _preprocess(self, tagged: List[Tuple]) -> Tuple:\n ori = \" \".join([tag[0] for tag in tagged])\n tags = [tag[1] for tag in tagged]\n # Mapping into general tagset\n tags = [self._map[tag] if tag in self._map else \"X\" for tag in tags]\n return \" \".join(tags), ori", "def...
[ "0.56815004", "0.5611002", "0.54052174", "0.5345678", "0.53289914", "0.5290784", "0.5282728", "0.52795637", "0.52427185", "0.5172312", "0.5162299", "0.5155179", "0.5121523", "0.5118857", "0.5089582", "0.50675386", "0.5064925", "0.506162", "0.50472486", "0.50319785", "0.500313...
0.56864506
0
Function mashes together classes functionality and performs the AI's move. It recursively calls the minimax algorithm and after finding the best move it adds tag into the tags list.
def bot_handle_move(self) -> None: best_value = -INFINITY # default best value for maximizing player (bot in this app is a maximizing player) available_moves = self.check_for_moves() # for more info check the minimax algorithm theory depth = int(1.4*self.size - self.win_condition) # (depth) decides of how deep into recursion the algorithm will best_move = None # get. 1.4 seems to be the best consensus between time of # execution and accuracy of moves for move in available_moves: self.tags[move[0]][move[1]] = 'o' move_value = self.minimax(depth, -INFINITY, INFINITY, False) self.tags[move[0]][move[1]] = None if move_value > best_value: best_value = move_value best_move = move self.tags[best_move[0]][best_move[1]] = 'o'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity,...
[ "0.55172825", "0.53830636", "0.537255", "0.53636533", "0.53437847", "0.53437847", "0.52932066", "0.5251569", "0.5240633", "0.5203933", "0.51793545", "0.5154717", "0.50856817", "0.5070792", "0.50352716", "0.50260264", "0.4998866", "0.49944788", "0.4988436", "0.4984612", "0.497...
0.62191063
0
Minimax algorithm equipped in prunning functionality and wrapped into Ticktactoe game environment.
def minimax(self, depth: int, alpha: float, beta: float, maximizing_player: bool) -> float: if self.check_if_win('x' if maximizing_player is True else 'o'): return -10 if maximizing_player else 10 if self.full_board(): return 1 if depth == 0: return 0 available_moves = self.check_for_moves() if maximizing_player: max_eval = -INFINITY for move in available_moves: self.tags[move[0]][move[1]] = 'o' evaluation = self.minimax(depth - 1, alpha, beta, False) self.tags[move[0]][move[1]] = None max_eval = max(max_eval, evaluation) alpha = max(alpha, evaluation) if beta <= alpha: break return max_eval else: min_eval = INFINITY for move in available_moves: self.tags[move[0]][move[1]] = 'x' evaluation = self.minimax(depth - 1, alpha, beta, True) self.tags[move[0]][move[1]] = None min_eval = min(min_eval, evaluation) beta = min(beta, evaluation) if beta <= alpha: break return min_eval
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterative_minimax_strategy(game: Any) -> Any:\n s = Stack()\n id0 = 0\n d = {0: Tree([id0, game, None])}\n s.add(0)\n\n while not s.is_empty():\n id1 = s.remove()\n item = [id1]\n if d[id1].children == []:\n for move in d[id1].value[1].current_state.get_possible_m...
[ "0.70844316", "0.7019589", "0.6996672", "0.6943626", "0.69175905", "0.6854551", "0.68362564", "0.6793055", "0.6789411", "0.6709212", "0.66958034", "0.66800773", "0.6658891", "0.6658891", "0.6573327", "0.6429762", "0.6422315", "0.6422285", "0.64213127", "0.6387401", "0.6386032...
0.0
-1
Summary This function encapsulates the routine to generate backprojected and cortical views for the magnocellular pathway retinal ganglion cells
def showNonOpponency(C,theta): GI = retina.gauss_norm_img(x, y, dcoeff[i], dloc[i], imsize=imgsize,rgb=False) # Sample using the other recepetive field, note there is no temporal response with still images S = retina.sample(img,x,y,dcoeff[i],dloc[i],rgb=True) #backproject the imagevectors ncentreV,nsurrV = rgc.nonopponency(C,S,theta) ninverse = retina.inverse(ncentreV,x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True) ninv_crop = retina.crop(ninverse,x,y,dloc[i]) ninverse2 = retina.inverse(nsurrV,x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True) ninv_crop2 = retina.crop(ninverse2,x,y,dloc[i]) # place descriptive text onto generated images cv2.putText(ninv_crop,"R+G + ",(xx,yy), font, 1,(255,255,255),2) cv2.putText(ninv_crop2,"R+G - ",(xx,yy), font, 1,(255,255,255),2) merged = np.concatenate((ninv_crop, ninv_crop2),axis=1) # create cortical maps of the imagevectors lposnon, rposnon = cortex.cort_img(ncentreV, L, L_loc, R, R_loc, cort_size, G) lnegnon, rnegnon = cortex.cort_img(nsurrV, L, L_loc, R, R_loc, cort_size, G) pos_cort_img = np.concatenate((np.rot90(lposnon),np.rot90(rposnon,k=3)),axis=1) neg_cort_img = np.concatenate((np.rot90(lnegnon),np.rot90(rnegnon,k=3)),axis=1) mergecort = np.concatenate((pos_cort_img,neg_cort_img),axis=1) return merged, mergecort
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_marginals_cristobal(rep=0, epoch=300, zoom=False):\n samples_path = paths.eICU_synthetic_dir + 'samples_eICU_cdgan_synthetic_dataset_r' + str(rep) + '_' + str(epoch) + '.pk'\n samples = np.load(samples_path)\n labels_path = paths.eICU_synthetic_dir + 'labels_eICU_cdgan_synthetic_dataset_r' + str(...
[ "0.57697564", "0.56456137", "0.5626997", "0.54784644", "0.5438757", "0.5425025", "0.5424559", "0.5415921", "0.5378045", "0.5369334", "0.5367731", "0.536001", "0.53513986", "0.5345884", "0.52968204", "0.52847725", "0.5271479", "0.52679116", "0.5252435", "0.5250521", "0.5245161...
0.0
-1
Summary This function encapsulates the routine to generate rectified backprojected views of all opponent retinal ganglion cells
def showBPImg(pV,nV): # object arrays of the positive and negative images inv_crop = np.empty(8, dtype=object) inv_crop2 = np.empty(8, dtype=object) for t in range(8): # backprojection functions inverse = retina.inverse(pV[:,t,:],x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True) inv_crop[t] = retina.crop(inverse,x,y,dloc[i]) inverse2 = retina.inverse(nV[:,t,:],x,y,dcoeff[i],dloc[i], GI, imsize=imgsize,rgb=True) inv_crop2[t] = retina.crop(inverse2,x,y,dloc[i]) # place descriptions cv2.putText(inv_crop[t],types[t] + " + ",(xx,yy), font, 1,(0,255,255),2) cv2.putText(inv_crop2[t],types[t] + " - ",(xx,yy), font, 1,(0,255,255),2) # stack all images into a grid posRG = np.vstack((inv_crop[:4])) negRG = np.vstack((inv_crop2[:4])) posYB = np.vstack((inv_crop[4:])) negYB = np.vstack((inv_crop2[4:])) merge = np.concatenate((posRG,negRG,posYB,negYB),axis=1) return merge
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_obs_grid(self):\n\n topX, topY, botX, botY = self.get_view_exts()\n\n grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)\n\n # for i in range(self.agent_dir + 1):\n # grid = grid.rotate_left()\n\n # Process occluders and visibility\n ...
[ "0.60112906", "0.56915206", "0.5676106", "0.56413585", "0.5641", "0.56370586", "0.5621993", "0.55501646", "0.55181795", "0.5482959", "0.541228", "0.540904", "0.5401558", "0.54013026", "0.5396132", "0.538648", "0.53705466", "0.533314", "0.530924", "0.5304909", "0.5286061", "...
0.51816034
42
Summary This function encapsulates the routine to generate rectified cortical views of all opponent retinal ganglion cells
def showCortexImg(pV,nV): # object arrays of the positive and negative images pos_cort_img = np.empty(8, dtype=object) neg_cort_img = np.empty(8, dtype=object) for t in range(8): # cortical mapping functions lpos, rpos = cortex.cort_img(pV[:,t,:], L, L_loc, R, R_loc, cort_size, G) lneg, rneg = cortex.cort_img(nV[:,t,:], L, L_loc, R, R_loc, cort_size, G) pos_cort_img[t] = np.concatenate((np.rot90(lpos),np.rot90(rpos,k=3)),axis=1) neg_cort_img[t] = np.concatenate((np.rot90(lneg),np.rot90(rneg,k=3)),axis=1) # stack all images into a grid posRGcort = np.vstack((pos_cort_img[:4])) negRGcort = np.vstack((neg_cort_img[:4])) posYBcort = np.vstack((pos_cort_img[4:])) negYBcort = np.vstack((neg_cort_img[4:])) mergecort = np.concatenate((posRGcort,negRGcort,posYBcort,negYBcort),axis=1) return mergecort
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_obs_grid(self):\n\n topX, topY, botX, botY = self.get_view_exts()\n\n grid = self.grid.slice(topX, topY, self.agent_view_size, self.agent_view_size)\n\n # for i in range(self.agent_dir + 1):\n # grid = grid.rotate_left()\n\n # Process occluders and visibility\n ...
[ "0.60586804", "0.59049714", "0.58672416", "0.58215404", "0.5733954", "0.5612951", "0.55945617", "0.55556804", "0.5546608", "0.55396324", "0.5498219", "0.5490474", "0.54275817", "0.54215384", "0.54196", "0.54184324", "0.5405503", "0.53821653", "0.53821653", "0.537462", "0.5356...
0.5189932
41
Summary Display function that generates the final output images using opencv windows
def imagetest(thetainput,doubleopponencyinput): theta = thetainput rgcMode = doubleopponencyinput C = retina.sample(img,x,y,coeff[i],loc[i],rgb=True) # CENTRE S = retina.sample(img,x,y,dcoeff[i],dloc[i],rgb=True) # SURROUND if rgcMode == 0: pV,nV = rgc.opponency(C,S,theta) else: pV,nV = rgc.doubleopponency(C,S,theta) cv2.namedWindow("Input", cv2.WINDOW_NORMAL) cv2.imshow("Input", img) rIntensity,cIntensity = showNonOpponency(C,theta) cv2.namedWindow("Intensity Responses", cv2.WINDOW_NORMAL) cv2.imshow("Intensity Responses", rIntensity) cv2.namedWindow("Intensity Responses Cortex", cv2.WINDOW_NORMAL) cv2.imshow("Intensity Responses Cortex", cIntensity) cv2.waitKey(0) #Generate backprojected images if showInverse: rOpponent = showBPImg(pV,nV) cv2.namedWindow("Backprojected Opponent Cells Output", cv2.WINDOW_NORMAL) cv2.imshow("Backprojected Opponent Cells Output", rOpponent) cv2.waitKey(0) # Cortex if showCortex: cOpponent = showCortexImg(pV,nV) cv2.namedWindow("Cortex Opponent Cells Output", cv2.WINDOW_NORMAL) cv2.imshow("Cortex Opponent Cells Output", cOpponent) cv2.waitKey(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self, name='Detections'):\n cv2.imshow(name, self.get_image())\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def show(self) -> None:\n cv.imshow(str(self.__class__), self.output_image)", "def display_img(title,img):\r\n cv2.namedWindow('img', cv2.WINDOW_NORMAL)\r\n cv...
[ "0.71970654", "0.7002493", "0.6995437", "0.6975558", "0.69294494", "0.69176483", "0.69103956", "0.68500286", "0.6783941", "0.6769802", "0.67669165", "0.6740594", "0.6729734", "0.6711798", "0.66964775", "0.6684703", "0.66664636", "0.66619647", "0.66425073", "0.6603232", "0.660...
0.0
-1
Summary Display function that generates the final output images using MatplotLib windows
def imagetestplt(thetainput,doubleopponencyinput): theta = thetainput rgcMode = doubleopponencyinput C = retina.sample(img,x,y,coeff[i],loc[i],rgb=True) # CENTRE(sharp retina) S = retina.sample(img,x,y,dcoeff[i],dloc[i],rgb=True) # SURROUND(blurred retina) if rgcMode == 0: pV,nV = rgc.opponency(C,S,theta) else: pV,nV = rgc.doubleopponency(C,S,theta) rIntensity,cIntensity = showNonOpponency(C,theta) # Construct window plots plt.subplot(3,1,1), plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)), plt.title('Original test image') plt.xticks([]), plt.yticks([]) plt.subplot(3,1,2), plt.imshow(cv2.cvtColor(rIntensity, cv2.COLOR_BGR2RGB)), plt.title('Backprojected R+G Intensity Response') plt.xticks([]), plt.yticks([]) plt.subplot(3,1,3), plt.imshow(cv2.cvtColor(cIntensity, cv2.COLOR_BGR2RGB)), plt.title('Cortical R+G Intensity Response') plt.xticks([]), plt.yticks([]) # format float to string thetastring = "%.2f" % theta plt.suptitle('Rectified DoG Intensity Images. Threshold:' + thetastring, fontsize=16) plt.show() #Generate backprojected images if showInverse: rOpponent = showBPImg(pV,nV) plt.imshow(cv2.cvtColor(rOpponent, cv2.COLOR_BGR2RGB)), plt.title('Backprojected Opponent Cells Output') plt.xticks([]), plt.yticks([]) plt.show() # Cortex if showCortex: cOpponent = showCortexImg(pV,nV) plt.imshow(cv2.cvtColor(cOpponent, cv2.COLOR_BGR2RGB)), plt.title('Cortex Opponent Cells Output') plt.xticks([]), plt.yticks([]) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n...
[ "0.6950144", "0.6822228", "0.6799319", "0.6795833", "0.6745523", "0.6712222", "0.66112965", "0.6607922", "0.65862733", "0.6581138", "0.65078765", "0.6487054", "0.64790344", "0.646684", "0.64635116", "0.64561826", "0.64561826", "0.64561826", "0.6436497", "0.64337176", "0.64246...
0.0
-1