query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Changes the priority of the element elem to prio.
def change_priority(self, elem, prio): pos = self.pos[elem] currPrio = self.A[pos][1] self.A[pos] = (elem, prio) if self.cmpFn(prio, currPrio): self.insert_loop(pos, pos // 2) # Up heapify else: self.combine(pos) # Down heapify
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPriority(self, p):\n self.priority = p", "def _update_priority(self, task, prio, worker):\n task.priority = prio = max(prio, task.priority)\n for dep in task.deps or []:\n t = self._state.get_task(dep)\n if t is not None and prio > t.priority:\n self._...
[ "0.67719734", "0.6752105", "0.6318923", "0.6283811", "0.6121634", "0.61172235", "0.6071931", "0.6042161", "0.5969836", "0.5969836", "0.5969836", "0.5935215", "0.5908808", "0.59068906", "0.5899444", "0.5884699", "0.5880411", "0.58484524", "0.58209383", "0.5818788", "0.5729039"...
0.81994
0
Gets the priority of an element.
def get_priority(self, elem): pos = self.pos[elem] return self.A[pos][1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getpriority(self, name):\n\t\tif name not in self:\n\t\t\treturn None\n\t\treturn self.attributes[name].priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(self):\n return self.__priority", "def _get_priority(s...
[ "0.7882315", "0.7649342", "0.7649342", "0.7649342", "0.7649342", "0.7647876", "0.7647876", "0.7504323", "0.74972016", "0.7453977", "0.74093306", "0.74017596", "0.73951834", "0.73866266", "0.7357312", "0.7357312", "0.7357312", "0.72419477", "0.72419477", "0.72419477", "0.72419...
0.8585831
0
Gets the minimum element of the heap.
def min(self): return self.get_first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findmin(self):\n return self.heap[0] if len(self.heap) > 0 else None", "def min(self):\r\n if self._size == 0: raise KeyError # Nothing to return if heap empty\r\n return self._data[0] # so simple!\r", "def get_min(self) -> object:\n if not self.is_empty():\n ...
[ "0.86202234", "0.8542973", "0.8532992", "0.85131574", "0.8460315", "0.84360385", "0.8252153", "0.81792325", "0.8092698", "0.8078686", "0.80421853", "0.79581094", "0.7663294", "0.76157564", "0.75814164", "0.7493778", "0.74732834", "0.7461151", "0.7447867", "0.7397468", "0.7345...
0.7234268
26
Gets the minimum element of the heap and removes it.
def take_min(self): return self.get_first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n ...
[ "0.8762053", "0.8528082", "0.85083675", "0.8469478", "0.83009934", "0.82481664", "0.8209636", "0.81854427", "0.8066061", "0.80339926", "0.80249083", "0.7980516", "0.788914", "0.7866211", "0.7851768", "0.78122234", "0.7769824", "0.77424836", "0.76917297", "0.7681458", "0.76671...
0.0
-1
Gets the maximum element of the heap.
def max(self): return self.get_first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def heap_extract_max(self, A):\n max = A[0]\n A[0] = A[len(A)-1]\n del A[-1] #use del so it stays out of the function\n self.max_heapify(A, 0)\n return max", "def heap_pop_max(heap):\n last = heap.pop()\n if heap:\n return_item = heap[0]\n heap[0] = last\n ...
[ "0.7984895", "0.79142934", "0.77526826", "0.75617915", "0.75280285", "0.7499288", "0.74823093", "0.74128735", "0.7341171", "0.73030984", "0.7244219", "0.7195734", "0.71896243", "0.7186615", "0.71417284", "0.71403116", "0.71197176", "0.71066064", "0.7081008", "0.70789945", "0....
0.73672485
8
Gets the maximum element of the heap and removes it.
def take_max(self): return self.delete_first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_max(self):\n retval = self.heap_list[1]\n self.heap_list[1] = self.heap_list[self.size]\n self.size = self.size - 1\n pop_val = self.heap_list.pop()\n self.percolate_down(1)\n return retval", "def remove(self):\n max_item = self.heaplist[1]\n sel...
[ "0.8782797", "0.8520369", "0.834615", "0.80668986", "0.79649794", "0.77679366", "0.77248996", "0.76835424", "0.7677773", "0.7659807", "0.76241404", "0.7556659", "0.75099057", "0.75092083", "0.74387366", "0.74314845", "0.73972696", "0.7393631", "0.7384405", "0.7384216", "0.737...
0.6685935
61
Transcodes a file src to a file dest.
def transcode(self, src: Path, dest: Path) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.erro...
[ "0.6751052", "0.6534881", "0.61683327", "0.6163265", "0.61372477", "0.6119148", "0.60524434", "0.602882", "0.598594", "0.5982905", "0.5955934", "0.59134305", "0.5866599", "0.585062", "0.58318645", "0.5749547", "0.5694051", "0.56439036", "0.5634352", "0.5634352", "0.5634352", ...
0.7879794
0
Takes an integer below 1001 and converts it into english text. Ignore spaces and hyphens as the instructions require.
def int2text(integer): # Numbers 1-99 are handled by simply looking up words in the special_case # dictionary. if integer < 100: return digit2text(integer) elif integer < 1000: # If exactly some hundred, then just return the word for the hundred's # place and the word 'hundred' if integer%100 == 0: return digit2text(integer/100)+'hundred' # Otherwise return the word for the hundred's place, the word # 'hundredand' and do some composition to make the rest of the words. else: return digit2text(integer/100)+'hundredand'+\ digit2text(integer%100) # Special case for 1000. elif integer == 1000: return "onethousand"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def number2text(integer)...
[ "0.7711457", "0.736089", "0.6570956", "0.65348077", "0.6504594", "0.6472066", "0.6465786", "0.6318002", "0.63022107", "0.6287715", "0.6235496", "0.6192539", "0.61917514", "0.6169387", "0.6149354", "0.6142758", "0.61054987", "0.60986495", "0.60660845", "0.60206014", "0.5872346...
0.7366785
1
Takes integer digits/double digits and returns the english text for these numbers.
def digit2text(integer): # If the integer is in the special cases dictionary, then look up the word, # return it, and we're done. if integer in special_case_dict.keys(): return special_case_dict[integer] # Otherwise compose the word, by taking the number in the ten's place and # multiplying by 10 (i.e. integer/10*10 evaluates to a number in the set # {10, 20, 30, 40, 50, 60, 70, 80, 90} for any input integer between 10-99. # Then add word for the number in the one's place else: return special_case_dict[integer/10*10]+special_case_dict[integer%10]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def int2text(integer):\n...
[ "0.7694077", "0.7104256", "0.7084235", "0.6613821", "0.64341813", "0.6333191", "0.6266428", "0.62469083", "0.6115907", "0.6083413", "0.6064189", "0.6041851", "0.5971319", "0.59671617", "0.59606844", "0.58907706", "0.5845223", "0.58414835", "0.5818904", "0.5810572", "0.5807192...
0.7075966
3
Solves [a]{b} = {x} by Gauss elimination.
def gaussElimin(a,b): a=float64(a) b=float64(b) n=len(b) x=zeros((n,1),dtype=float) for k in range(n-1): for i in range(k+1,n): l=float(a[i][k])/a[k][k] a[i][k]=0 for j in range(k+1,n): a[i][j]=a[i][j]-l*a[k][j] b[i]=b[i]-l*b[k] x[n-1]=float(b[n-1])/a[n-1][n-1] for i in range(n-2,-1,-1): sum=b[i] for j in range(i+1,n): sum=sum-a[i][j]*x[j] x[i]=float(sum)/a[i][i] return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_elimination(A, b):\n \n m, n = A.shape\n U = A.copy() \n b = b.copy()\n\n # forward sweep, reduce A to a upper triangular matrix\n for k in range(min(m, n)):\n swap = np.argmax(np.abs(U[k:, k])) + k\n if U[swap, k] == 0:\n raise ValueError('Singular matrix')\...
[ "0.7353518", "0.71408343", "0.71251684", "0.70046204", "0.6569199", "0.64925224", "0.6439031", "0.63340366", "0.62848866", "0.6225892", "0.6141184", "0.61222094", "0.6090073", "0.6016392", "0.5969952", "0.5954091", "0.59377366", "0.5927988", "0.59233207", "0.59059817", "0.583...
0.7767935
0
Solves [L][U]{x} = b, where [a] = [L\U] is the matrix returned from LUdecomp.
def LUsolve(a,b): b=float64(b) n=len(b) LU=LUdecomp(a) y=zeros((n,1)) x=zeros((n,1)) y[0]=b[0] for i in range(1,n): sum=b[i] for j in range(i): sum=sum-LU[i][j]*y[j] y[i]=sum x[n-1]=float(y[n-1])/LU[n-1][n-1] for i in range(n-2,-1,-1): sum=y[i] for j in range(i+1,n): sum=sum-LU[i][j]*x[j] x[i]=float(sum)/LU[i][i] return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LU_solve(A, d, b):\n \n\n L, U = L1U(A, d)\n\n y = rforwardsolve(L, b, d)\n x = rbackwardsolve(U, y, d)\n\n return x", "def lu_decom(A,b):\n # init\n n = len(b)\n L = np.eye(n)\n U = np.zeros((n,n))\n x = np.zeros(n)\n y = np.zeros(n)\n\n # decomposition A = LU\n\n U[0,...
[ "0.8204323", "0.77781606", "0.7435162", "0.7326853", "0.71451354", "0.7074888", "0.6962283", "0.69174", "0.6882864", "0.6877735", "0.68558407", "0.6839501", "0.67079276", "0.6632435", "0.6581103", "0.6516492", "0.6455007", "0.63229835", "0.62904537", "0.62402534", "0.6206865"...
0.79200137
1
Starting offset of the segment
def start(self): return self.start_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_offset(self):\n return self.get_info_value(\"D_STARTOFFS\")", "def offset_from_start(self, part):\n index = self.parts.index(part)\n return sum([p.length for p in self.parts[:index]])", "def min_offset(self):\n return self.offset", "def offset_segment(self, i):\n ...
[ "0.75695366", "0.7171375", "0.7162001", "0.71174693", "0.7113862", "0.7081324", "0.7017677", "0.7017677", "0.7017677", "0.69003886", "0.679636", "0.67469805", "0.66896033", "0.6640042", "0.66054326", "0.6576339", "0.6576339", "0.6576339", "0.6549182", "0.6549182", "0.64778554...
0.0
-1
End offset of the segment
def end(self): return self.end_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEnd(self):\n return _libsbml.LineSegment_getEnd(self)", "def end(self):\n return self.start + self.size", "def endaddr(self):\n return self.startaddr + self.size", "def _get_end(self):\n return self._end", "def get_end(self):\n return self.__end", "def getEnd(self) -> lo...
[ "0.73068535", "0.72605735", "0.72010446", "0.699925", "0.6922253", "0.6915124", "0.68964475", "0.6843549", "0.68276006", "0.6826976", "0.67586005", "0.6580891", "0.6480106", "0.64694715", "0.64635617", "0.6447487", "0.64299506", "0.6396373", "0.63472545", "0.63421696", "0.634...
0.6141078
31
Name of the tag
def name(self): return self.name_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag(self) -> str:\n return self.name or ''", "def get_tag_name(self):\n\n pass", "def tag_name(self) -> str:\n return pulumi.get(self, \"tag_name\")", "def get_name(self):\n return self.tagnode", "def tag_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"ta...
[ "0.8629432", "0.856235", "0.84365237", "0.80975956", "0.8038163", "0.7732518", "0.764949", "0.76234204", "0.7597434", "0.7510624", "0.74376386", "0.74048376", "0.7393784", "0.73702985", "0.73563516", "0.7353813", "0.7353813", "0.7353813", "0.7353813", "0.7344953", "0.73428065...
0.0
-1
Value of the tag
def tag(self): return self.tag_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tag_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tag_value\")", "def get(self):\n return self.tag.get()", "def value(self):\n return self.raw.get_attribute(\"value\")", "def value(self):\n return self.element.get_attribute('value')", "def value(self):\...
[ "0.7868648", "0.764538", "0.76248944", "0.7556161", "0.7556161", "0.7556161", "0.747307", "0.74238324", "0.74004674", "0.7335146", "0.730605", "0.7223917", "0.72173995", "0.7187691", "0.71812", "0.7105039", "0.70779276", "0.7067446", "0.70370305", "0.70255345", "0.70255345", ...
0.6916473
41
Computes the Jacobian of y wrt x assuming minibatchmode.
def _get_minibatch_jacobian(y, x, create_graph=False): assert y.shape[0] == x.shape[0] y = y.view(y.shape[0], -1) # Compute Jacobian row by row. jac = [] for j in range(y.shape[1]): dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True, create_graph=True)[0].view(x.shape[0], -1) jac.append(torch.unsqueeze(dy_j_dx, 1)) jac = torch.cat(jac, 1) return jac
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_minibatch_jacobian(y, x):\n assert y.shape[0] == x.shape[0]\n y = y.view(y.shape[0], -1)\n\n # Compute Jacobian row by row.\n jac = []\n for j in range(y.shape[1]):\n dy_j_dx = torch.autograd.grad(y[:, j], x, torch.ones_like(y[:, j]), retain_graph=True,\n ...
[ "0.8049506", "0.7586799", "0.7340082", "0.7092742", "0.70245093", "0.6984471", "0.6948992", "0.68618804", "0.6859211", "0.6838144", "0.6793028", "0.67266047", "0.6696106", "0.66701853", "0.6601887", "0.6596935", "0.65616024", "0.65616024", "0.6518664", "0.6505477", "0.6503522...
0.7515247
2
Return a Expression_obj whose name is gene_id
def __init__(self): # self.organism = [] self.weighting_dict = defaultdict(list) # self.codon_obj_dict = {} self.codon_dict = { 'UUU':'F','UUC':'F', 'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L', 'AUU':'I','AUC':'I','AUA':'I', 'AUG':'M', 'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V', 'UCU':'S','UCC':'S','UCA':'S','UCG':'S', 'CCU':'P','CCC':'P','CCA':'P','CCG':'P', 'ACU':'T','ACC':'T','ACA':'T','ACG':'T', 'GCU':'A','GCC':'A','GCA':'A','GCG':'A', 'UAU':'Y','UAC':'Y', 'UAA':'X','UAG':'X', 'CAU':'H','CAC':'H', 'CAA':'Q','CAG':'Q', 'AAU':'N','AAC':'N', 'AAA':'K','AAG':'K', 'GAU':'D','GAC':'D', 'GAA':'E','GAG':'E', 'UGU':'C','UGC':'C', 'UGA':'X', 'UGG':'W', 'CGU':'R','CGC':'R','CGA':'R','CGG':'R', 'AGU':'S','AGC':'S', 'AGA':'R','AGG':'R', 'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G' }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_gene(self, gene_id, source=\"eid\"):\n\n gene_id = str(gene_id)\n\n \n try:\n valid_gene = self.gene_cache[self.source_cache[source][gene_id]]\n return valid_gene\n except KeyError:\n pass\n \n valid_eid = None\n\n if source ...
[ "0.6313475", "0.63014066", "0.6045709", "0.5961227", "0.5909269", "0.5827519", "0.5769331", "0.5765576", "0.57507443", "0.57040155", "0.56940395", "0.5685017", "0.5614033", "0.55961704", "0.55855423", "0.5572768", "0.5556484", "0.5548169", "0.5545586", "0.5535128", "0.5527104...
0.0
-1
Constructs a Octave ResNet26 model.
def pre_act_oct_resnet26(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [2, 2, 2, 2], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet10(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [1, 1, 1, 1], shortcut_type, num_classes, in_channels)\n return model", "def resnet34(shortcut_type, num_classes, in_channels):\n model = ResNet(BasicBlock, [3, 4, 6, 3], shortcut_type, num_classes, in_channels)\n ret...
[ "0.65736985", "0.6366523", "0.6366468", "0.6340577", "0.6290355", "0.62448686", "0.6211965", "0.62079406", "0.6202202", "0.61859906", "0.617765", "0.6167639", "0.6150246", "0.6131579", "0.61293316", "0.6126124", "0.60872185", "0.60808474", "0.60243255", "0.6022623", "0.601866...
0.64773536
1
Constructs a Octave ResNet50 model.
def pre_act_oct_resnet50(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 4, 6, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n fc = transfer_fc(model.fc)\n model.fc = fc\n return model", "def ResNet50_model(input_shape, pooling):\n f...
[ "0.6788361", "0.67803186", "0.674441", "0.6740062", "0.6740062", "0.6740062", "0.6740062", "0.67154825", "0.67154825", "0.6712815", "0.6572177", "0.6566482", "0.6549519", "0.6523207", "0.64100075", "0.6408448", "0.6368369", "0.6368369", "0.6367889", "0.63454634", "0.6303097",...
0.66204983
10
Constructs a Octave ResNet101 model.
def pre_act_oct_resnet101(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 4, 23, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet101(pretrained=False, **kwargs):\n model = ResNet('resnet101', Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 2...
[ "0.6975478", "0.6963279", "0.6944051", "0.6944051", "0.6944051", "0.6944051", "0.69128805", "0.68887615", "0.6869963", "0.6868124", "0.68124294", "0.6709491", "0.6664259", "0.6648746", "0.66283625", "0.66283625", "0.65992206", "0.65794027", "0.6534455", "0.6504741", "0.646747...
0.6689366
12
Constructs a Octave ResNet152 model.
def pre_act_oct_resnet152(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 8, 36, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_model(self):\r\n\r\n self.model = ResNet152V2(weights='imagenet')", "def resnet152(pretrained=False, **kwargs):\n model = ResNet('resnet152', Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model"...
[ "0.71429336", "0.71426964", "0.70883363", "0.7048324", "0.70366234", "0.70190185", "0.6981999", "0.6981999", "0.6981999", "0.6981999", "0.6969816", "0.6943194", "0.6817958", "0.67309207", "0.6713831", "0.6711944", "0.6711944", "0.6689598", "0.6594432", "0.65649575", "0.624270...
0.6930058
12
Constructs a Octave ResNet200 model.
def pre_act_oct_resnet200(pretrained=False, **kwargs): model = PreActOctResNet(Bottleneck, [3, 24, 36, 3], **kwargs) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "def...
[ "0.6754732", "0.6618465", "0.65908283", "0.63108593", "0.63079596", "0.62873125", "0.62037766", "0.61919415", "0.61760974", "0.61187875", "0.6101522", "0.60922974", "0.6071278", "0.6071278", "0.6071278", "0.6071278", "0.6071278", "0.60647464", "0.6059056", "0.6056413", "0.602...
0.66531056
1
The standard size of a tile sprite in 2D screen space.
def tile_size_2d(self): return 32.0, 32.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cellsize_2d(self):\t\r\n return self.dx * self.dy", "def pixelSize(self):\n br = self.sceneBoundingRect()\n if self.image is None:\n return 1,1\n return br.width()/self.width(), br.height()/self.height()", "def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n ...
[ "0.677379", "0.6732188", "0.6701107", "0.6691744", "0.6675488", "0.66745335", "0.666135", "0.6641239", "0.6588583", "0.6561213", "0.6488441", "0.64793986", "0.6465913", "0.64484566", "0.6422894", "0.6421343", "0.6418597", "0.6404673", "0.639991", "0.6364815", "0.63445616", ...
0.79170084
0
Sets the cell at the given position to the tile given via the tile index.
def set_cell(self, x, y, tile_index): data_index = x + y * self._size[0] # type: int # self._data[data_index] = tile_index # # if self._sprites[data_index]: # self._sprites[data_index].delete() # self._sprites[data_index] = None # Release resources if self._tiles[data_index]: self._tiles[data_index].delete() self._tiles[data_index] = None # Only create sprite when not zero if tile_index: tile_prototype = self._tile_set.get(tile_index, None) # type: Optional[Tile] if not tile_prototype: raise TileSetError("tile set does not contain tile for index %s" % tile_index) tile_w, tile_h = self._tile_size_2d i, j, _k = cart_to_iso(x, y, 0) ax, ay = tile_prototype.anchor tile_x, tile_y = i * tile_w - ax, j * tile_h - ay tile = deepcopy(tile_prototype) tile.sprite = pyglet.sprite.Sprite(tile.image, tile_x, tile_y) tile.aabb3d.pos = float(x), float(y), 0.0 tile.aabb2d.pos = tile_x, tile_y self._tiles[data_index] = tile # self._sprites[data_index] = pyglet.sprite.Sprite(tile.image, tile_x, tile_y) # Currently only supports a single level, so everything is on z-level 0 # self._aabb3d[data_index] = AABB3D(float(x), float(y), 0.0, tile.size[0], tile.size[1], tile.size[2]) # self._aabb2d[data_index] = AABB2D(tile_x, tile_y, tile_w, tile_h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n self._cells[row][col] = value", "...
[ "0.79476476", "0.793965", "0.792472", "0.7922317", "0.7811975", "0.77657914", "0.77657914", "0.77415025", "0.77413976", "0.77362347", "0.77177304", "0.7665559", "0.76413506", "0.76413506", "0.7600459", "0.7590378", "0.7555909", "0.7555414", "0.7547121", "0.74911577", "0.73877...
0.77405584
9
Adds an object, to be managed and drawn by the tile map.
def add_object(self, obj): self._objects.append(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addObject(self,object):\n object.screen = self.screen\n object.parent = self\n self.addList.append(object)", "def add_object(self, obj):\n\t\tself.objects.append(obj)", "def add(self, obj):\n if isinstance(obj, Drawable):\n self._drawables.add(...
[ "0.77441376", "0.7656286", "0.75963026", "0.7574792", "0.75481164", "0.7518914", "0.74521136", "0.737135", "0.7309146", "0.7289169", "0.71311355", "0.70795727", "0.69718575", "0.6938054", "0.68440473", "0.68283266", "0.681699", "0.68109393", "0.67909193", "0.6732807", "0.6706...
0.7219038
10
Sorts the drawables in an order that they can be rendered using the painter's algorithm (back to front).
def sort(self, key_func): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self, **kwargs):\n for o in sorted(self._drawables, key=default_itemgetter(\"z\", default=0)):\n o.draw(**kwargs)", "def start_sorting(self):\n if self.sorting:\n return None\n self.sorting = True\n\n passes = 0\n while self.sorting:\n ...
[ "0.6645382", "0.6610428", "0.6208446", "0.6029311", "0.60101074", "0.60021496", "0.59987986", "0.5927565", "0.5859521", "0.58288765", "0.5824865", "0.5824865", "0.58011025", "0.57603604", "0.57544345", "0.5688162", "0.5662971", "0.5642846", "0.5616782", "0.55934376", "0.55926...
0.0
-1
Save the response string.
def writeresponse(self, rspstr): self.response += rspstr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_response(self, res) -> None:\n file = open(\"response_{}.json\".format(self.num_res), \"w\")\n file.write(str(res))\n file.close()", "def save_response(self, request, response):\n response_dict = self.process_response(request.path, response)\n try:\n self.se...
[ "0.7586363", "0.7441666", "0.67181605", "0.6693726", "0.66190016", "0.6497438", "0.64742", "0.6389973", "0.6360549", "0.62210476", "0.6212932", "0.6212932", "0.6147369", "0.6139059", "0.6138615", "0.6062147", "0.60561544", "0.6024466", "0.602372", "0.59702677", "0.5941074", ...
0.6328722
9
Using the Command_Handler from command module to handle command.
def usingHandler(self, cmd): self.command_handler.handle_command(cmd) while msg_queue.empty() is False: self.writeresponse(msg_queue.get())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _command(self, *cmd, handler=None):", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n ...
[ "0.8338623", "0.75426656", "0.7504038", "0.7325603", "0.7277105", "0.726302", "0.71191317", "0.7092065", "0.70484436", "0.6998338", "0.6996647", "0.6957424", "0.6936447", "0.6885723", "0.683087", "0.6806178", "0.676416", "0.6748807", "0.6729248", "0.6727872", "0.67130303", ...
0.77345246
1
Attach ipmiconsole to target instance specified by its name
def start(instance="default"): # initialize logging global logger_ic logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance) common.init_logger(instance) # initialize environment env.local_env.quit_flag = False common.init_env(instance) pid_file = "{}/{}/.ipmi_console.pid".format(config.infrasim_home, instance) daemon.daemonize(pid_file) with open(pid_file, "r") as fp: logger_ic.info("ipmi-console of {} start with pid {}". format(instance, fp.read().strip())) # parse the sdrs and build all sensors sdr.parse_sdrs() # running thread for each threshold based sensor _start_monitor(instance) _spawn_sensor_thread() _start_console(instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_new_instance():\n import IPython\n\n IPython.Shell.start().mainloop()", "def attach(self):\r\n sshpass = \"sshpass -p \\\"akanksha1\\\"\"\r\n remote_Station = \"root@192.168.54.24\"\r\n base = \"ssh -t \" + options[\"username\"] + \"@\" + options[\"server\"]\r\n\r\n s...
[ "0.555184", "0.53565377", "0.5350084", "0.52938074", "0.527807", "0.51141536", "0.503388", "0.5028491", "0.5026515", "0.50112593", "0.50111884", "0.49634692", "0.49329525", "0.4874104", "0.48671383", "0.4858034", "0.48359525", "0.48175284", "0.48108807", "0.47951758", "0.4767...
0.5010261
11
Target method used by monitor thread, which polls vbmc status every 3s. If vbmc stops, ipmiconsole will stop.
def monitor(instance="default"): global logger_ic while True: try: with open("{}/{}/.{}-bmc.pid".format( config.infrasim_home, instance, instance), "r") as f: pid = f.readline().strip() if not os.path.exists("/proc/{}".format(pid)): logger_ic.warning("Node {} vBMC {} is not running, " "ipmi-console is ready to quit". format(instance, pid)) break time.sleep(3) except IOError: logger_ic.warning("Node {} workspace is possibly destroyed, " "ipmi-console is ready to quit".format(instance)) break stop(instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitor(self, rms):\n pass", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n ...
[ "0.6409184", "0.627425", "0.62064976", "0.61520576", "0.61488384", "0.608878", "0.6028219", "0.60113215", "0.58789355", "0.586533", "0.5813013", "0.5793242", "0.57749766", "0.5774655", "0.5764802", "0.57504106", "0.57363623", "0.5703783", "0.57020646", "0.56902844", "0.568184...
0.65879107
0
Create a monitor thread to watch vbmc status.
def _start_monitor(instance="default"): global logger_ic logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance) logger_ic.info("ipmi-console monitor thread starts to run.") monitor_thread = threading.Thread(target=monitor, args=(instance,)) monitor_thread.setDaemon(True) monitor_thread.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _StartStatusUpdateThread(self):\n self._status_update_active = True\n self._status_update_thread = threading.Thread(\n name='Status update', target=self._StatusUpdateThreadMain)\n self._status_update_thread.start()", "def run(self):\n self.monitor.start()", "def run():\n logger.ve...
[ "0.63383204", "0.6285425", "0.6240025", "0.5985517", "0.59852713", "0.59686154", "0.5919293", "0.59185845", "0.5913581", "0.5894443", "0.58611625", "0.58052415", "0.579297", "0.5792904", "0.5758416", "0.5757731", "0.57059795", "0.57036275", "0.5703507", "0.56903523", "0.56861...
0.62090486
3
Stop ipmiconsole of target instance specified by its name
def stop(instance="default"): global logger_ic logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance) try: file_ipmi_console_pid = "{}/{}/.ipmi_console.pid".\ format(config.infrasim_home, instance) with open(file_ipmi_console_pid, "r") as f: pid = f.readline().strip() os.kill(int(pid), signal.SIGTERM) logger_ic.info("SIGTERM is sent to pid: {}".format(pid)) os.remove(file_ipmi_console_pid) except IOError: # When pid file is missing, by e.g., node destroy, # find process id by instance name if instance == "default": process_name = "ipmi-console start$" else: process_name = "ipmi-console start {}".format(instance) ps_cmd = r"ps ax | grep '{}' | grep Sl | awk '{{print $1}}' | head -n1".format(process_name) logger_ic.warning("Fail to find ipmi console pid file, check by:") logger_ic.warning("> {}".format(ps_cmd)) _, pid = run_command(cmd=ps_cmd) logger_ic.warning("ipmi console pid got: {}".format(pid)) if not pid: logger_ic.warning("ipmi console for instance {} is not running".format(instance)) return os.kill(int(pid), signal.SIGTERM) logger_ic.info("SIGTERM is sent to pid: {}".format(pid)) except Exception: logger_ic.warning(traceback.format_exc()) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n self.scion_sh('stop')", "def processStop(name):\n imrclient.update_server_info()\n imrclient.process_stop(name)", "def stop_notebook_instance(NotebookInstanceName=None):\n pass", "def stop(self):\n if self.debug:\n print(\"%s stop\" % self.name)\n s...
[ "0.69108975", "0.6825068", "0.6671944", "0.6663248", "0.66268575", "0.65905327", "0.65741503", "0.6553872", "0.6435648", "0.6377335", "0.6354143", "0.63085943", "0.6253596", "0.6247487", "0.62375504", "0.62224996", "0.62192184", "0.61786765", "0.6176067", "0.6176067", "0.6176...
0.6886424
1
Return an upload ID.
def startupload(request): if "parent" in request.POST: parent = models.Character.objects.get(pk=request.POST["parent"]) else: parent = None c = models.Character(parent=parent) c.save() d = json.loads(request.POST['data']) for k, v in d["texts"].items(): set_text(c, k, v) for k, v in d["sounds"].items(): set_sound(c, k, v) return JsonResponse({"id": c.pk})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initiate_multipart_upload(self):\n request = self.s3.create_request(\"OBJECT_POST\", uri = self.uri, headers = self.headers_baseline, extra = \"?uploads\")\n response = self.s3.send_request(request)\n data = response[\"data\"]\n self.upload_id = getTextFromXml(data, \"UploadId\")\n ...
[ "0.6757901", "0.67014873", "0.6528737", "0.6503102", "0.644656", "0.6368345", "0.6355548", "0.62546957", "0.6124786", "0.6089393", "0.6034663", "0.6034663", "0.5999296", "0.5985921", "0.59466124", "0.59466124", "0.5939695", "0.5939695", "0.5924135", "0.5912577", "0.5890405", ...
0.0
-1
Creates a handle to the Ceph Cluster.
def connect(ceph_config_file, timeout = CEPH_TIMEOUT): handle = rados.Rados(conffile = ceph_config_file) LOGGER.info("librados version: " + str(handle.version())) LOGGER.info("Attempting to connect to: " + str(handle.conf_get('mon initial members'))) handle.connect() #timeout shoudl be specified LOGGER.info("Cluster ID" + handle.get_fsid()) return handle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', timeout)\n cluster['version'] = shell_command('ceph -v') + b'\\n'\n\n # ceph versions command was introduced in mimic\n version = c...
[ "0.6605776", "0.6418442", "0.6327758", "0.61983645", "0.61876357", "0.61836255", "0.6062055", "0.6059325", "0.60109323", "0.5957398", "0.5952814", "0.58882135", "0.58059293", "0.57884413", "0.5772146", "0.5749063", "0.57396287", "0.5687946", "0.5676263", "0.56761366", "0.5651...
0.6422216
1
execute a shell command in the cluster
def shell_command(command, shell=True): p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=shell) result = p.communicate()[0] if result == "command not known": LOGGER.info("command not known " + err) return result.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute(cmd) :\n return os.system( cmd )", "def do_shell(self, command):\n os.system(command)", "def raw_cluster_cmd(self, *args, **kwargs):\n proc = self.controller.run(args=[os.path.join(BIN_PREFIX, \"ceph\")] + \\\n list(args), **kwargs)\n r...
[ "0.682894", "0.6679226", "0.6656794", "0.66437536", "0.6632637", "0.6577042", "0.65581995", "0.65328985", "0.65173554", "0.65003866", "0.64953446", "0.6490907", "0.64615303", "0.64323294", "0.6411985", "0.63707674", "0.6368736", "0.63686305", "0.6345518", "0.631454", "0.63143...
0.0
-1
Gather overall cluster information
def get_ceph_info(handle, ceph_config, timeout): cluster = dict() cluster['status'] = ceph_mon_command(handle, 'status', timeout) cluster['version'] = shell_command('ceph -v') + b'\n' # ceph versions command was introduced in mimic version = cluster['version'] version = str(version.decode('utf-8')).split(' ')[2].split(".")[0] if int(version) >= 13: cluster['versions'] = shell_command('ceph versions') + b'\n' fsid = handle.get_fsid() + '\n' cluster['fsid'] = str.encode(fsid) with open(ceph_config, 'r') as f: ceph_conf = f.read() cephconf = str(ceph_conf) cluster['ceph_conf'] = str.encode(cephconf) return cluster
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def show_overview(self) -> None:\n pri...
[ "0.7719436", "0.74022055", "0.73409766", "0.7059982", "0.6924038", "0.6866448", "0.67935354", "0.67794836", "0.6743119", "0.6643425", "0.66368085", "0.6614722", "0.6586277", "0.6576589", "0.6573444", "0.6546255", "0.65434664", "0.651949", "0.6503765", "0.64962125", "0.6486102...
0.0
-1
Gather cluster health information
def get_health_info(handle, timeout): health = dict() health['stat'] = ceph_mon_command(handle, 'health' , timeout) # TODO command not known with ceph_mon_command #health['detail'] = ceph_mon_command(handle, 'health detail', timeout) health['detail'] = shell_command('ceph health detail') + b'\n' health['df'] = ceph_mon_command(handle, 'df' , timeout) health['report'] = ceph_mon_command(handle, 'report' , timeout) return health
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_health(self, host):\n\n h = self.call_to_cluster(host, '/_cluster/health')\n\n data = {\n 'number_of_nodes': h['number_of_nodes'],\n 'unassigned_shards': h['unassigned_shards'],\n 'timed_out': h['timed_out'],\n 'active_primary_shards': h['active...
[ "0.8006035", "0.7863848", "0.71975887", "0.700192", "0.6814766", "0.6722469", "0.6707405", "0.66621304", "0.665207", "0.65768224", "0.65568644", "0.6537222", "0.6509707", "0.6436703", "0.6426991", "0.64085305", "0.6380572", "0.6379265", "0.63764757", "0.63066256", "0.62769216...
0.68823427
4
Gather ceph monitor information
def get_monitor_info(handle, timeout): mon_info = dict() mon_info['stat'] = ceph_mon_command(handle, 'mon stat' , timeout) mon_info['dump'] = ceph_mon_command(handle, 'mon dump' , timeout) mon_info['map'] = ceph_mon_command(handle, 'mon getmap' , timeout) mon_info['metadata'] = ceph_mon_command(handle, 'mon metadata', timeout) return mon_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_monitor_data(self):\n json = await self._api_call(\"app/monitors/%s/overview\" % self.sense_monitor_id)\n if \"monitor_overview\" in json and \"monitor\" in json[\"monitor_overview\"]:\n self._monitor = json[\"monitor_overview\"][\"monitor\"]\n return self._monitor", ...
[ "0.66455966", "0.6420232", "0.64127403", "0.6299886", "0.6260533", "0.6227388", "0.6212769", "0.6193962", "0.6172191", "0.6142712", "0.60478276", "0.6043625", "0.60359365", "0.5971578", "0.5969627", "0.5957598", "0.5948469", "0.5928224", "0.5919851", "0.5902216", "0.58823013"...
0.7485646
0
GAther ceph device information
def get_device_info(handle, timeout): device_info = dict() device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout) return device_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_info(self) -> Dict[str, Any]:\n via_device = 'meter_adapter'\n if self.toon.gas.is_smart:\n via_device = 'electricity'\n\n return {\n 'name': 'Gas Meter',\n 'identifiers': {\n (DOMAIN, self.toon.agreement.id, 'gas'),\n },\n ...
[ "0.6951333", "0.6848414", "0.6711916", "0.6700383", "0.667151", "0.66525185", "0.6542051", "0.6531767", "0.6522371", "0.6514975", "0.64998937", "0.64957225", "0.6470811", "0.6451061", "0.6438751", "0.6416084", "0.64145595", "0.6406177", "0.63985395", "0.63964", "0.63928473", ...
0.6898668
1
Gather ceph manager information
def get_manager_info(handle, timeout): mgr_info = dict() mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout) mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout) mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout) return mgr_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body", "def get_ceph_info(handle, ceph_config, timeout):\n cluster = dict()\n\n cluster['status'] = ceph_mon_command(handle,\n 'status', time...
[ "0.65334594", "0.6489982", "0.6204253", "0.60092634", "0.6008082", "0.5893398", "0.5754568", "0.5699375", "0.56716347", "0.5611492", "0.5582611", "0.5579014", "0.5577621", "0.5576179", "0.55696046", "0.5542833", "0.55382943", "0.55256027", "0.5521256", "0.55209965", "0.551072...
0.7890347
0
Writes the diagnostics to specific files and creates a tarball for the same
def dict_to_files(result_dict, dest_dir): tempdir = tempfile.mkdtemp() # timestamp every generated dignostic file timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%I%S") tarball = '{0}/ceph-collect_{1}.tar.gz'.format(dest_dir, timestamp) with tarfile.open(tarball, 'w:gz') as tar: for filename, content in result_dict.items(): for contentname, contentdata in content.items(): tmpfile = '{0}/{1}'.format(tempdir, filename + "-" + contentname) LOGGER.debug('Writing file %s', tmpfile) print('Writing file %s', tmpfile) with open(tmpfile, 'wb') as f: f.write(contentdata) f.close() tar.add(name=tmpfile, arcname='ceph-collect_{0}/{1}'.format(timestamp, filename + "-" + contentname)) tar.close() LOGGER.info("Diagnostics are written to : "+ tarball) LOGGER.info("Cleaning up temporary directory") shutil.rmtree(tempdir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_tarball(args, tarfilename, archivefiles=[]):\n if not archivefiles:\n return None\n \n manifest_filename, manifest_uuid = render_manifest(args, archivefiles)\n try:\n with tarfile.open(tarfilename, f\"{FILE_FLAG}:gz\") as tarball:\n file_count = 0\n for fna...
[ "0.6348986", "0.62728393", "0.6065827", "0.6017353", "0.6005254", "0.6003141", "0.5832814", "0.57835436", "0.5780004", "0.57281613", "0.57143897", "0.57073367", "0.5691607", "0.566522", "0.55969507", "0.55673474", "0.5560966", "0.55485785", "0.5524467", "0.5479293", "0.546824...
0.53366596
38
Ensure this function returns the correct number of entities with the specified tag.
def test_get_sets_by_category(): group_categories = get_sets_by_category(mb, "Group") assert len(group_categories) == 5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count(self, tag: Text) -> int:\r\n sub_tags = tag.split(\"+\")\r\n return len([e for e in self.elements if all(t in e.tags for t in sub_tags)])", "def getTagsNum(self):\r\n self.gettags()", "def count_tags():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_tag...
[ "0.70608175", "0.678857", "0.6734028", "0.65930325", "0.64529455", "0.62508005", "0.6231032", "0.62118965", "0.61979675", "0.6197728", "0.6096715", "0.6096469", "0.6078573", "0.59946", "0.59408474", "0.5923345", "0.59161204", "0.590068", "0.5879492", "0.5843947", "0.58285606"...
0.0
-1
Ensure this function returns the correct list of entity sets without the graveyard volume.
def test_locate_graveyard(): groups_to_write, graveyard_sets = locate_graveyard(mb) assert groups_to_write == [12682136550675318125, 12682136550675318126, 12682136550675318128, 12682136550675318129]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def available_sets(session, player):\n excluded_sets = set(session.taken.keys())\n for grouping in session.exclusives:\n if player.sets.intersection(grouping):\n excluded_sets.update(grouping)\n return [s for s in session.sets if s not in excluded_sets]", "def gather_entities(self):\n ...
[ "0.6598351", "0.6436746", "0.6417409", "0.6331896", "0.6149638", "0.604962", "0.5946124", "0.58908427", "0.5782512", "0.57297885", "0.571627", "0.55856144", "0.55809915", "0.5561783", "0.5537913", "0.5536738", "0.5535733", "0.54536754", "0.5453462", "0.5442003", "0.53676707",...
0.0
-1
Ensure this function returns the correct default output file name.
def test_default_format_file_name(): output_name = format_file_name(test_file) assert (output_name == test_file[:-4] + "_no_grave.h5m") == True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"output_file_name\")", "def output_file_name(self) -> typing.Optional[str]:\n return self._values.get(\"outp...
[ "0.7893717", "0.7893717", "0.7893717", "0.7893717", "0.7785941", "0.77704704", "0.7656715", "0.7550296", "0.7494922", "0.7245793", "0.71426344", "0.7105859", "0.707106", "0.70194244", "0.7014153", "0.6989266", "0.69176805", "0.69050795", "0.6865408", "0.68430066", "0.6822271"...
0.69034684
18
Ensure this function returns the correct user input file name.
def test_option_format_file_name(): output_name = format_file_name(test_file, 'test_output.h5m') assert (output_name == 'test_output.h5m') == True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_path():\n file_name = input(\"Enter the file name:\")\n return file_name", "def obtain_filename():\n file_wanted = input(\"Filename? \")\n return file_wanted", "def file_name_request(self):\n self.file_name = input(\"What is the name of the input file?\\n>>>\")", "def get_filename...
[ "0.8286607", "0.8252012", "0.80501723", "0.76167345", "0.7578909", "0.74168056", "0.73490226", "0.7260431", "0.72341657", "0.723208", "0.7181744", "0.71346575", "0.70963013", "0.7040597", "0.70244354", "0.7022156", "0.6870741", "0.68621856", "0.68099684", "0.6801889", "0.6728...
0.0
-1
Ensure that graveyard_removal.py correctly removes the graveyard from an h5m file.
def test_default_graveyard_removal(): os.system("python svalinn_tools/graveyard_removal.py " + test_file_path + test_file) size = os.path.getsize(test_file[:-4] + "_no_grave.h5m") assert size == 5748780
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cleanup():\n os.remove(test_file[:-4] + \"_no_grave.h5m\")", "def remove_group(self):\n try:\n with open_hdf5(self.file_name, mode=\"a\") as hdf_file:\n del hdf_file[self.h5_path]\n except KeyError:\n pass", "def cleanup_file(name: str):\n if os...
[ "0.64098233", "0.6114747", "0.58413917", "0.582247", "0.57496554", "0.56727177", "0.5573327", "0.5490028", "0.54383737", "0.53650856", "0.53048605", "0.5302251", "0.5285933", "0.5256595", "0.52538306", "0.5244757", "0.5230618", "0.5228572", "0.5218584", "0.5204441", "0.516990...
0.7458633
0
Ensure that graveyard_removal.py prints the correct entity handle for the graveyard volume.
def test_print_graveyard_removal(capfd): os.system("python svalinn_tools/graveyard_removal.py " + test_file_path + test_file + " -p") out, err = capfd.readouterr() assert ("12682136550675318127" in out) == True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n ...
[ "0.5670078", "0.5624108", "0.554459", "0.5481827", "0.5419034", "0.53412575", "0.5311965", "0.52677137", "0.518722", "0.51854277", "0.5156141", "0.5146773", "0.5126611", "0.5102824", "0.50757784", "0.5075659", "0.5067923", "0.50451326", "0.49824572", "0.49812868", "0.4970936"...
0.6111419
0
Remove the files written to disk by this class of tests.
def test_cleanup(): os.remove(test_file[:-4] + "_no_grave.h5m")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def tearDown(self) -> None:\n filtered = [f for f in glob.glob('steps/tests/test_output/*') if not re.match(r'\\.keep...
[ "0.8038646", "0.80053955", "0.79976135", "0.7974729", "0.7966363", "0.78566366", "0.7773938", "0.7761133", "0.77407455", "0.7704826", "0.76752174", "0.76329154", "0.76329154", "0.76329154", "0.7632071", "0.7624899", "0.761122", "0.76048136", "0.7599777", "0.7578465", "0.75228...
0.0
-1
Validate the regex patterns, but only partially while the user is still typing. Because the 'from' pattern will be where the user specifies captures, changing it also requires revalidating the substitution pattern. However if the user is still typing (as opposed to hitting enter to complete the input) we do the minimal amount of work necessary, i.e we just set the colors back to neutral and disable the Apply button.
def validateRegexFields(self, complete=False): # Assume the patterns aren't valid. self.m_validFromRe = False self.m_validPatterns = False ### Validate the 'from' pattern # regexCtl = self.m_reFromCtl subsCtl = self.m_reToCtl regex, subs = regexCtl.Value, subsCtl.Value regColor, subColor = wx.NullColour, wx.NullColour if complete and regex: regColor = subColor = wx.BLUE try: re.sub(regex, subs, '') except re.error as e: subColor = wx.RED try: re.compile(regex) except re.error as e: regColor = wx.RED else: self.m_validFromRe = True else: self.m_validFromRe = True self.m_validPatterns = bool(subs) self.setTextColor(regexCtl, regColor) self.setTextColor(subsCtl, subColor) if complete: self.populateFileList() else: self.m_applyBtn.Enabled = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onTextChange(self, event):\n\n self.validateRegexFields(complete=False)\n event.Skip()", "def onHitEnterInFrom(self, event):\n\n self.validateRegexFields(complete=True)\n if self.m_validFromRe:\n self.m_reToCtl.SetFocus()", "def __checkForPattern(self):\n if se...
[ "0.6140925", "0.6094179", "0.60423654", "0.56661874", "0.5438786", "0.53020585", "0.5268809", "0.51951706", "0.5172561", "0.50293213", "0.501354", "0.49975908", "0.49453467", "0.49100548", "0.49090192", "0.48892468", "0.48374686", "0.48184943", "0.47469115", "0.46745083", "0....
0.7344064
0
Refresh our list of what's on disk.
def updateDiskFileList(self): if self.m_curPath: # Get me just the files please. for _, _, files in os.walk(self.m_curPath): break else: files = [] files.sort() if files != self.m_diskNames: self.m_diskNames[:] = files self.m_newNames[:] = [] self.populateFileList()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()", "def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []", "def refresh(self, list_of_tables):\n sel...
[ "0.71091926", "0.7044655", "0.700236", "0.68334323", "0.68334323", "0.6765857", "0.66914636", "0.66914636", "0.66914636", "0.65789855", "0.65704066", "0.65704066", "0.6506549", "0.6506549", "0.6505286", "0.6480908", "0.6439831", "0.6422957", "0.64214545", "0.64141774", "0.632...
0.70745224
1
Uses the list of filesondisk and the regex patterns to build a list of what the directory will look like if we renamed the files. Because we're justusing a simple text list, we use symbols to show the user which filenames would change and whether they would produce any duplicates, substituting "." with "\1.txt".
def populateFileList(self): self.m_fileList.SetForegroundColour(wx.NullColour) # We'll need to track which file names are modified and which # file names duped. applicable, dupes = set(), set() if not self.m_validPatterns: # Regex's don't compile yet, just use the raw filename list. newNames = self.m_diskNames else: # Apply the substitution to the filename list to produce a # destination-name list, and identify whether the patterns # actually affect anything. # newNames, modifiedIndexes = [], [] matcher = re.compile(self.m_reFromCtl.Value).subn subs = self.m_reToCtl.Value for filename in self.m_diskNames: # Perform the sub (filename, numChanges) = matcher(subs, filename) # Was there a modification? if numChanges: # Record the affected name. applicable.add(filename) if filename in newNames: dupes.add(filename) # Add to the primary list newNames.append(filename) # Does this produce a different list than we already had? If so, # clear the file list and replace it with the new one. # if newNames != self.m_newNames: self.m_fileList.Clear() # Figure out the longest name so we can create a cleanly-formatted # set of prefix/suffix characters for the modified/duped annotation. # maxLen = max(map(len, newNames)) decorate = '{m} {fn:<{ml}} {m}'.format # Now build a list of display elements. for filename in newNames: mark = ' ' if filename not in applicable else '|' if filename in dupes: mark = '*' self.m_fileList.Append(decorate(m=mark, fn=filename, ml=maxLen)) # Keep the list. self.m_newNames[:] = newNames # Update the apply button, we only want it enabled when the user # has a valid set of patterns that affect any files and have no # dupes produced as a result. # self.m_applyBtn.Enabled = bool(applicable) and not dupes if dupes: # Emphasize the presence of dupes. self.m_fileList.SetForegroundColour(wx.RED) # Draw the list. self.m_fileList.Refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rename_files(file_list, src_dir, pattern, rename=False):\n i = 0\n renamed = regex_group_split(file_list, pattern, False)\n renamed_w_path = [src_dir + fn for fn in renamed]\n orig_fp_list = orig_filepath_list(file_list, src_dir)\n\n for filename in file_list:\n if not (orig_fp_list[i] ==...
[ "0.62912023", "0.6283787", "0.61243826", "0.61238414", "0.6057", "0.6056427", "0.60261464", "0.6002019", "0.58697075", "0.58516043", "0.58399916", "0.5831643", "0.5815127", "0.5750396", "0.57445604", "0.57320064", "0.5677666", "0.56666535", "0.56408477", "0.5636326", "0.56356...
0.6573242
0
Handle the user changing directory.
def onDirectorySelectionChanged(self, event): newPath = self.m_directoryCtl.Path if self.m_curPath == newPath: return self.m_applyBtn.Disable() self.m_directoryCtl.ExpandPath(newPath) # Clear the directory list. self.m_fileList.Clear() self.m_curPath = newPath self.updateDiskFileList()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ChangeDir(self, path: str) -> None:\n ...", "def change_base_dir():\n global base_dir\n while True:\n new_base = raw_input(\"New user directory? \")\n new_base = os.path.abspath(new_base)\n if os.path.exists(new_base):\n if os.path.isfile(new_base):\n ...
[ "0.71233064", "0.688778", "0.67881495", "0.6747246", "0.6743474", "0.6619424", "0.6606413", "0.6544958", "0.6513183", "0.6458206", "0.64284754", "0.6231321", "0.62166995", "0.6192592", "0.6178349", "0.6172817", "0.6166601", "0.6157614", "0.6141804", "0.6134251", "0.6129112", ...
0.57040215
45
User has clicked the Apply button.
def onApply(self, event): # Rename all of the files based on the substitution. for (old, new) in zip(self.m_diskNames, self.m_newNames): if old != new: old = os.path.join(self.m_curPath, old) new = os.path.join(self.m_curPath, new) try: os.rename(old, new) except OSError: pass # Now we out the lists so that what the user sees after this # reflects what's on disk. self.m_diskNames[:] = [] self.m_newNames[:] = [] # Update. self.updateDiskFileList()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dlg_apply(self):\n pass # override", "def on_okButton_clicked(self):\n self.accept=True", "def apply(event: EventType, widget: WidgetType) -> bool:\n return event.key == KEY_APPLY", "def on_apply_clicked(self,button):\n\t\tdialog = ConfirmPerformActions()\n\t\t\n\t\tresponse = dialog...
[ "0.6905215", "0.68310213", "0.66178197", "0.64573747", "0.6415029", "0.63940597", "0.63610774", "0.62184435", "0.61905", "0.6176993", "0.6145998", "0.61428314", "0.60933006", "0.6080761", "0.6052281", "0.59710234", "0.59425336", "0.5938652", "0.5905501", "0.5880406", "0.58425...
0.0
-1
When the user hits 'enter' in the 'from' field.
def onHitEnterInFrom(self, event): self.validateRegexFields(complete=True) if self.m_validFromRe: self.m_reToCtl.SetFocus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_enter():\n enter_event = QtGui.QKeyEvent(\n QEvent.KeyPress, Qt.Key_Enter, Qt.KeyboardModifiers())\n QtGui.QApplication.sendEvent(self, enter_event)", "def hit_enter():\n keyboard.press_and_release('Enter')", "def enter(self):\n\t\tself.actionObject().key_...
[ "0.6778405", "0.6758216", "0.67262506", "0.6404956", "0.6386746", "0.63825494", "0.63070136", "0.62395203", "0.6209298", "0.62009835", "0.61862344", "0.61658704", "0.616317", "0.6027476", "0.6005713", "0.5913", "0.5905714", "0.58879393", "0.58879393", "0.58485764", "0.5829572...
0.7331321
0
When the user hits 'enter' in the substitution field.
def onHitEnterInTo(self, event): self.validateRegexFields(complete=True) if self.m_validPatterns: self.m_fileList.SetFocus()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hit_enter():\n keyboard.press_and_release('Enter')", "def enter(self):\n\t\tself.actionObject().key_down(Keys.ENTER).key_up(Keys.ENTER).perform()", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def handle_enter():\n enter_event = QtGui.QKeyEvent(\n QEvent.K...
[ "0.7329176", "0.6851118", "0.683353", "0.6715554", "0.66356564", "0.6603124", "0.6601415", "0.6570257", "0.6491624", "0.6442124", "0.638937", "0.62842214", "0.62780654", "0.6265659", "0.6146514", "0.6146514", "0.61026675", "0.60870814", "0.60781586", "0.6066289", "0.6063572",...
0.5933596
26
When the user modifies the content of either regex field.
def onTextChange(self, event): self.validateRegexFields(complete=False) event.Skip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_field(self, **kwargs):\n if self.regex:\n if not 'regex' in self.field_args:\n self.field_args = self.field_args + ('regex', )\n self.field_klass = forms.RegexField\n return super(StringSetting, self).to_field(**kwargs)", "def get_regex_mismatch_error_tex...
[ "0.59722745", "0.58232623", "0.5730046", "0.5724786", "0.5445473", "0.53800493", "0.53737646", "0.53433657", "0.53141534", "0.5252366", "0.52106875", "0.52057683", "0.5202676", "0.515042", "0.51475894", "0.51414645", "0.51303077", "0.5116394", "0.5103767", "0.50991905", "0.50...
0.6521979
0
Assemble the option parser.
def initOpts(): option_list = [ make_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help=""), make_option("-l", "--licenses", action="store", type="string", dest="licenses_xml", help="Use the specified licenses file.", default="licenses.xml"), make_option("-o", "--output", action="store", type="string", dest="output_rdf", help="Write the RDF to the specified file.", default=""), ] usage = "%prog [-v] [-l licenses.xml] [-o output.rdf]" parser = OptionParser(usage=usage, version="%%prog %s" % __version__, option_list = option_list) return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_parser(cls, option_group, args, mkflag):", "def setup_parser(cls, option_group, args, mkflag):", "def build_parser(self, parser: ArgumentParser) -> None:", "def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage =...
[ "0.7635837", "0.7635837", "0.7205048", "0.71621025", "0.7039156", "0.7033386", "0.702205", "0.6990235", "0.69513005", "0.6902962", "0.68967104", "0.6892518", "0.6840839", "0.6837334", "0.68262494", "0.6812211", "0.6810176", "0.6762378", "0.67567515", "0.67565316", "0.6741107"...
0.6769619
17
Run the makerdf script.
def main(): optparser = initOpts() (options, args) = optparser.parse_args() output = StringIO.StringIO() assembleRDF(file(options.licenses_xml), output, options.verbose) if options.output_rdf: file(options.output_rdf, 'w').write(output.getvalue()) else: print output.getvalue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(): \n # Parse Arguments\n args = parse_arguments()\n\n # Print outdir\n print(\"Writing output to \" + args.outdir)\n\n # Print start statement\n print('Starting script for ' + args.file + ' at ' + str(datetime.datetime.now()), flush=True)\n\n # Put all the files in a function that w...
[ "0.67287403", "0.63006973", "0.6264529", "0.6237319", "0.6058982", "0.60356236", "0.59877056", "0.59641856", "0.59610593", "0.59463936", "0.5924916", "0.59105444", "0.59030676", "0.5876134", "0.585013", "0.582454", "0.58127964", "0.5797604", "0.5779497", "0.5763903", "0.57620...
0.0
-1
Returns the list of log paths that are not known by the log configuration associated to this instance
def get_new_logs(log_paths,log_conf): if log_conf is None or log_conf.get_host() is None: return log_paths conf_logs = log_conf.get_host().get_logs() new_logs = [log_path for log_path in log_paths if log_path not in conf_logs] print 'New logs detected on %s: %s'(log_conf.get_host().get_name(), new_logs) logger.info('New logs detected on %s: %s',log_conf.get_host().get_name(), new_logs) return new_logs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_paths(self): # pylint:disable=function-redefined\n return self._log_paths", "def all_logs(self):\n return os.listdir(LOGS_BASE_PATH)", "def get_file_list_without_current_log():\n full_list = sorted(filter(os.path.isfile, os.listdir('.')), key=os.path.getmtime)\n full_list.remove(\"...
[ "0.8348011", "0.6821954", "0.6548825", "0.64387006", "0.6341025", "0.6129678", "0.6074887", "0.6059957", "0.605284", "0.605284", "0.60285383", "0.60223985", "0.5995747", "0.5980469", "0.59733", "0.5953644", "0.59462976", "0.5943091", "0.59387076", "0.5936064", "0.59292436", ...
0.60559905
8
Returns the remote logging configuration or None if the remote configuration does not exist.
def get_instance_log_conf(instance_id): # Retrieve current log config file log_conf_file = None filename = 'logentries_%s.conf'%instance_id rsyslog_conf_name = '/etc/rsyslog.d/%s'%filename local_conf_name = '/tmp/%s'%filename # Clean file present try: local('rm %s'%local_conf_name) except: print 'Could not remove %s. It may not exist'%(local_conf_name) logger.warning('Could not remove %s. It may not exist'%(local_conf_name)) # Get remote conf file or return None if it cannot be retrieved try: get(rsyslog_conf_name,local_conf_name) except: print '%s does not exist on instance %s'%(rsyslog_conf_name,instance_id) logger.warning('%s does not exist on instance %s',rsyslog_conf_name,instance_id) return None # Open conf file or return None if it cannot be opened try: log_conf_file = open(local_conf_name,'r') except: print 'Cannot open %s from instance %s'%(local_conf_name,instance_id) logger.warning('Cannot open %s from instance %s',local_conf_name,instance_id) return None return log_conf_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logging_config(self) -> 'outputs.LoggingConfigResponse':\n return pulumi.get(self, \"logging_config\")", "def logging_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfig']:\n return pulumi.get(self, \"logging_config\")", "def logging_config(self) -> Optional[pulumi.In...
[ "0.68369645", "0.6830208", "0.67188877", "0.63710445", "0.62929714", "0.62837815", "0.62123984", "0.61937004", "0.59980434", "0.58134604", "0.5746437", "0.57139", "0.5709636", "0.56954515", "0.5683042", "0.5662481", "0.5662481", "0.564444", "0.5639923", "0.5633198", "0.563116...
0.5990378
9
Returns the updated log_conf, taking into account new log files present on the instance as well as modifications made to the corresponding logentries host.
def update_instance_conf(log_paths, log_conf): log_client = LogClient.Client(account_key) instance_id, config = get_ssh_config(env.host) if log_conf is None and len(log_paths)>0: print 'log_conf is None' log_conf = create_host_logs(log_client,instance_id,log_paths) elif log_conf is not None: print 'log_conf is not None' conf_host = log_conf.get_host() if conf_host is None: print 'Error. This instance configuration is missing the corresponding model!! instance_id=%s'%instance_id logger.error('Error. This instance configuration is missing the corresponding model!! instance_id=%s',instance_id) log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf if conf_host.get_key() is None: print 'Host %s has an logentries-rsyslog config file but no account key!!'%host.get_name() logger.warning('Host %s has an logentries-rsyslog config file but no account key!!',host.get_name()) log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf account = log_client.get_account() matching_host = None for host in account.get_hosts(): if host.get_key() == conf_host.get_key(): matching_host = host break # If there is no matching host, then it is assumed that it was deleted from Logentries and that no configuration should be associated to this instance. if matching_host is None: log_conf = create_host_logs(log_client,instance_id,log_paths) return log_conf for new_log in get_new_logs(log_paths, log_conf): # Update matching host so that each new log becomes part of it. matching_host = log_client.create_log_token(host=matching_host,log_name=new_log) log_conf.set_host(matching_host) return log_conf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_instance_log_conf(instance_id):\n # Retrieve current log config file\n log_conf_file = None\n\n filename = 'logentries_%s.conf'%instance_id\n rsyslog_conf_name = '/etc/rsyslog.d/%s'%filename\n local_conf_name = '/tmp/%s'%filename\n \n # Clean file present\n try:\n local('rm %...
[ "0.7078796", "0.6454262", "0.6153468", "0.609547", "0.6065723", "0.6051836", "0.60207915", "0.6019159", "0.59815073", "0.59241617", "0.5814336", "0.58045876", "0.5802499", "0.5684129", "0.56608063", "0.5597701", "0.5580219", "0.55438983", "0.55438983", "0.54896456", "0.542672...
0.77249867
0
Do lots of magic to make alembic work programmatically from the CLI.
def get_alembic_config(database_url: str) -> Config: migrations_dir = os.path.dirname(os.path.abspath(__file__)) directory = os.path.join(migrations_dir, "migrations") config = Config(os.path.join(migrations_dir, "alembic.ini")) config.set_main_option("script_location", directory.replace("%", "%%")) config.set_main_option("sqlalchemy.url", database_url) return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(arguments):\n migration = Migration(arguments)\n return migration.run()", "def current():\n alembic_command.current(alembic_config, verbose=True)", "def _init_db():\n import alembic.config\n import alembic.command\n alembic_cfg = alembic.config.Config('alembic.ini')\n alembic_cfg....
[ "0.64835244", "0.6423896", "0.63449913", "0.6249926", "0.61865556", "0.618295", "0.6082049", "0.5999133", "0.59890723", "0.5959365", "0.5921134", "0.58983696", "0.5896638", "0.5896638", "0.5881768", "0.58784425", "0.58784425", "0.5840882", "0.58390534", "0.5833645", "0.583052...
0.0
-1
Runs the migrations and creates all of the database objects.
def init_db(database_url: str, fidesctl_config: FidesctlConfig) -> None: alembic_config = get_alembic_config(database_url) upgrade_db(alembic_config) load_default_taxonomy(fidesctl_config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_db():\n database.db.create_all()\n get_ulm()\n for fixture_file in glob.glob(config.DevelopmentConfig.FIXTURES_DIRS + '/*.json'):\n fixtures = JSONLoader().load(fixture_file)\n load_fixtures(database.db, fixtures)\n MigrationManager().stamp_db()", "def db_create_all(self)...
[ "0.74945307", "0.74615896", "0.7393201", "0.7345915", "0.7331787", "0.73113805", "0.72740376", "0.7251851", "0.71819717", "0.7157426", "0.71482134", "0.7122612", "0.70904714", "0.7079735", "0.7049788", "0.70419806", "0.7040114", "0.70348716", "0.7018809", "0.70154107", "0.699...
0.0
-1
Drops all tables/metadata from the database.
def reset_db(database_url: str) -> None: engine = get_db_engine(database_url) connection = engine.connect() SqlAlchemyBase.metadata.drop_all(connection) migration_context = MigrationContext.configure(connection) version = migration_context._version # pylint: disable=protected-access if version.exists(connection): version.drop(connection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def drop_tables() -> None:\n print(\"Dropping database tables usin...
[ "0.84615856", "0.8291717", "0.81522286", "0.81405354", "0.8026864", "0.80001897", "0.79949796", "0.79896367", "0.79855984", "0.7966979", "0.7918987", "0.7878273", "0.78614014", "0.7860039", "0.7847825", "0.7836062", "0.78076893", "0.77844214", "0.7772575", "0.77703017", "0.77...
0.0
-1
Epsilon is used here to avoid conditional code for checking that neither P nor Q is equal to 0.
def KL(P,Q): epsilon = 0.00001 #You may want to instead make copies to avoid changing the np arrays. P = P+epsilon Q = Q+epsilon divergence = np.sum(P*np.log(P/Q)) return divergence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epsilon():\n return _EPSILON", "def get_initial_epsilon(self):\n return self.epsilon_percentile, True, self.max_rounds == 0", "def epsilon(self):\n return self.__epsilon", "def epsilon(self):\n return self._epsilon", "def MccEpsilon(self):\n if getattr(self, '_MccEpsilon', ...
[ "0.71985364", "0.6888928", "0.6771035", "0.6715826", "0.6521258", "0.64338565", "0.63468957", "0.63350797", "0.6333442", "0.6308612", "0.62801576", "0.6250783", "0.62459266", "0.6223243", "0.6196995", "0.6183421", "0.61769325", "0.6173163", "0.61617094", "0.61539805", "0.6140...
0.0
-1
Returns the item with the highest valued key
def pop(self): if len(self.heap)==0: raise ValueError("Tried popping empty heap") return heapq.heappop(self.heap)[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_key(self):\n return self._price_list[-1]", "def key_of_max(d):\n keys = list(d.keys())\n keys.sort()\n return max(keys, key=lambda x: d[x])", "def keywithmaxval(d):\n\treturn max(d, key=lambda k: d[k])", "def max_key (dict):\n output = -1\n for key, value in dict.items()...
[ "0.77251846", "0.74479496", "0.7443115", "0.728091", "0.7273358", "0.72690016", "0.7162577", "0.7084611", "0.706453", "0.702705", "0.69870156", "0.69063205", "0.69063205", "0.69063205", "0.6894217", "0.68838924", "0.687145", "0.68680716", "0.6850627", "0.67813027", "0.6753813...
0.0
-1
Given a normal feature matrix, creates a transpose feature matrix, a list of discrete features, and a list of nonempty features for each id. If the dataset is dense, this uses a Numpy matrix to save on space. Otherwise, it uses a listofdicts structure.
def __init__(self,db): self._numFeatures = len(db.keys) self._numEntries = len(db.entries) numMissing = 0 if isinstance(db.entries[0],dict): #already sparse database given as input self.featureMatrix = None self.featureDicts = [{} for i in range(self._numFeatures)] self.discreteFeature = [True]*self._numFeatures for i in xrange(self._numFeatures): for j in xrange(self._numEntries): if i in db.entries[j]: v = db.entries[j][i] if v != int(v): self.discreteFeature[i] = False break self.entryLists = [[] for i in range(self._numFeatures)] self.featureSets = [] for i in xrange(self._numEntries): flist = [] for j in xrange(self._numFeatures): if j in db.entries[i]: flist.append(j) self.entryLists[j].append(i) self.featureDicts[j][i] = db.entries[i][j] else: numMissing += 1 self.featureSets.append(set(flist)) else: featureMatrix = np.array(db.entries,dtype=np.float_) self.featureMatrix = np.asfortranarray(featureMatrix).T self.featureDicts = [{} for i in range(self._numFeatures)] self.discreteFeature = [] for i in xrange(self.featureMatrix.shape[0]): self.discreteFeature.append(not any(v != int(v) for v in self.featureMatrix[i,:] if not np.isnan(v))) self.entryLists = [[] for i in range(self._numFeatures)] self.featureSets = [] for i in xrange(self._numEntries): flist = [] for j in xrange(self._numFeatures): if not np.isnan(featureMatrix[i,j]): flist.append(j) self.entryLists[j].append(i) self.featureDicts[j][i] = featureMatrix[i,j] else: numMissing += 1 self.featureSets.append(set(flist)) if numMissing == 0: self.featureSets = None self.featureDicts = None else: self.featureMatrix = None self.sparsity = float(numMissing) / (self._numFeatures*self._numEntries)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)", "def preprocess_features(fea...
[ "0.5814003", "0.5814003", "0.5798338", "0.5759967", "0.5759967", "0.5759362", "0.5754472", "0.56665456", "0.56545", "0.5584708", "0.5582025", "0.5555118", "0.5517428", "0.551733", "0.5496943", "0.54869235", "0.5478383", "0.5463325", "0.5463325", "0.54537517", "0.5449544", "...
0.0
-1
Given a dict, returns the key that has maximum value (arg max)
def argmax(table): return max((v,k) for k,v in table.iteritems())[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_key(dico):\n our_max = 0\n argmax = None\n for key, val in dico.items():\n if val > our_max:\n argmax = key\n our_max = val\n return argmax", "def max_key (dict):\n output = -1\n for key, value in dict.items():\n output = max(output, k...
[ "0.86044014", "0.85811913", "0.85383964", "0.8332157", "0.8297145", "0.82476956", "0.8214348", "0.8207442", "0.8156642", "0.81523865", "0.8088131", "0.8088131", "0.8088131", "0.80344087", "0.7471879", "0.7465998", "0.74641794", "0.74351037", "0.7350328", "0.73464257", "0.7231...
0.7673297
14
Given a distribution, given by the list p_list, returns the entropy of the distribution.
def entropy(p_list): assert len(p_list) > 0 E = 0.0 for p in p_list: if p == 0.0: continue E += p*math.log(p) return E
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(l):\n\n probabilities = np.bincount(l) / len(l)\n with np.errstate(divide='ignore'): # ignore log(0) errors, we'll handle\n log_probabilities = np.log2(probabilities)\n log_probabilities[~np.isfinite(log_probabilities)] = 0\n return -np.sum(probabilities * log_probabilities)", ...
[ "0.75446194", "0.73901826", "0.7331276", "0.7134225", "0.70520824", "0.7050444", "0.69640076", "0.6912087", "0.68127155", "0.68039405", "0.6800982", "0.6751854", "0.6749378", "0.6710503", "0.6699265", "0.6682273", "0.6634976", "0.6633226", "0.6585759", "0.6569177", "0.6564183...
0.83441865
0
For a list of dictionaries mapping values to counts, returns a cost used for DT splitting that is optimal at 0. Currently uses the negative of information gain.
def split_cost(label_count_list): return -split_information_gain(label_count_list) #this cost value is the misclassification error. return split_misclassification_error(label_count_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cost(foods, foods_used):\n cost = 0.00\n for i, count in foods_used.items():\n cost += (foods[i]['serving_cost'] * count)\n return cost", "def weighted_score(counters, lst, weight):\n if counters == None:\n counters = {}\n\n\n for item in lst:\n if item in counters:\n ...
[ "0.58399343", "0.5823463", "0.5779527", "0.55471617", "0.5469961", "0.5425537", "0.53996956", "0.5383586", "0.533916", "0.5299643", "0.52919436", "0.5257763", "0.52473646", "0.52160746", "0.52141124", "0.52082145", "0.51415884", "0.5134601", "0.5126979", "0.5097489", "0.50955...
0.6280166
0
Given a list of values and associated labels, optimizes the best split threshold z where dividing the values into z has the lowest split cost. Returns a pair (z,cost) where cost is the split_cost of the threshold z. If nonelabels is given, this indicates the labels of missing values that must be passed down to both subtrees. This does not affect the output z but it does affect the output cost value.
def best_split(values,labels,nonelabels=None): assert len(values) >= 2 assert len(values) == len(labels) N = len(values) ilist = sorted((v,l) for (v,l) in zip(values,labels)) leftcount = defaultdict(int) rightcount = defaultdict(int) for v,l in ilist: rightcount[l] += 1 bestindex = -1 bestcost = split_cost([leftcount,rightcount]) cost = bestcost #costs = [cost] #print "Split costs:" for i in xrange(len(ilist)): v,l = ilist[i] rightcount[l] -= 1 leftcount[l] += 1 if i+1 >= len(ilist) or v == ilist[i+1][0]: #no splits when v is equal to the next value continue cost = split_cost([leftcount,rightcount]) #print " ",v,leftcount.values(),rightcount.values(),cost #costs.append(cost) if cost < bestcost: bestcost = cost bestindex = i #raw_input() if bestindex < 0: #no split found... try splitting in half splitval = (ilist[0][0]+ilist[-1][0])*0.5 else: splitval = (ilist[bestindex][0] + ilist[bestindex+1][0])*0.5 if nonelabels is None: return (splitval,bestcost) #reevaluate counts leftcount = defaultdict(int) rightcount = defaultdict(int) for l in nonelabels: leftcount[l] += 1 rightcount[l] += 1 for v,l in ilist: if v <= splitval: leftcount[l] += 1 else: rightcount[l] += 1 return splitval,split_cost([leftcount,rightcount])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_cost(label_count_list):\n return -split_information_gain(label_count_list)\n #this cost value is the misclassification error.\n return split_misclassification_error(label_count_list)", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n ...
[ "0.6308325", "0.60905", "0.586987", "0.5617052", "0.5437539", "0.5403514", "0.5329129", "0.52851343", "0.52182657", "0.51694614", "0.5163954", "0.5138482", "0.5117913", "0.5107134", "0.5100632", "0.5094282", "0.50677717", "0.50626785", "0.50522244", "0.5046674", "0.50465786",...
0.7924888
0
Normalizes an unnormalized histogram / probability distribution
def normalize(counts): numvals = sum(counts.itervalues()) if numvals <= 0: return counts res = dict() for (k,cnt) in counts.iteritems(): res[k] = float(cnt)/float(numvals) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise_histogram(histogram):\n total_sum = np.sum(histogram)\n for i in range(len(histogram)):\n histogram[i] /= total_sum\n\n return histogram", "def normalization(distribution):\r\n total_sum = 0\r\n for number in distribution.values():\r\n total_sum += number\r\n \r\...
[ "0.7903847", "0.773003", "0.76129586", "0.73363006", "0.724139", "0.6925099", "0.6923839", "0.6893623", "0.6813233", "0.6747994", "0.6707329", "0.6559963", "0.6552609", "0.65408355", "0.65334505", "0.65331537", "0.6515405", "0.6507021", "0.6436819", "0.6389514", "0.6374892", ...
0.62111866
42
Predicts the label of the given entry. If it contains None elements (missing values), the return value is a probability distribution over possible outcomes (given by a dict).
def predict(self,entry): if self.type == 'v': return self.value v = entry[self.feature] if v is None: #multiple childrens' predictions counts = defaultdict(int) labels = self.predict_all(entry,counts) if len(counts) == 1: return counts.keys()[0] #return a probability distribution return normalize(counts) #maximum likelihood #return argmax(counts) if self.type == 's': c = None try: c = self.children[v] except KeyError: #print "Unseen value for feature",self.feature,": ",v best = None bestDist = float('inf') for (val,c) in self.children.iteritems(): if abs(val - v) < bestDist: bestDist = abs(val - v) best = c c = best return c.predict(entry) elif self.type == 'i': if v <= self.value: return self.children[0].predict(entry) else: return self.children[1].predict(entry) raise RuntimeError("Invalid DecisionTreeNode type?")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, observation):\n\t\t# TODO - complete this\n\t\tp_max = 0\n\t\tpredict = None\n\t\tfor label in self.possible_labels:\n\t\t\tpossiblity = 1\n\t\t\tlabel_gaussian = self.gaussians.get(label)\n\t\t\tfor i in range(len(observation)):\n\t\t\t\t(mean, std) = label_gaussian[0][i]\n\t\t\t\tvalue = observ...
[ "0.6872355", "0.6781801", "0.66207117", "0.64889395", "0.6419287", "0.64013904", "0.63310605", "0.6304575", "0.62747353", "0.62652564", "0.6265103", "0.62375915", "0.61843824", "0.617628", "0.6124116", "0.605304", "0.60273445", "0.60081005", "0.5998837", "0.59917265", "0.5979...
0.65727895
3
Looks up the leaf node corresponding to the given entry. Does not handle missing values.
def lookup(self,entry): if self.type == 'v': return self v = entry[self.feature] assert v != None if self.type == 's': c = None try: c = self.children[v] except KeyError: #print "Unseen value for feature",self.feature,": ",v best = None bestDist = float('inf') for (val,c) in self.children.iteritems(): if abs(val - v) < bestDist: bestDist = abs(val - v) best = c c = best return c.lookup(entry) elif self.type == 'i': if v <= self.value: return self.children[0].lookup(entry) else: return self.children[1].lookup(entry) raise RuntimeError("Invalid DecisionTreeNode type?")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_parent_node(self, entry, key):\r\n parent = entry\r\n keys = key.split(\".\")\r\n for k in keys:\r\n try:\r\n parent = parent[k]\r\n except:\r\n raise Exception(\"key \\\"\" + key + \"\\\" was not found in HAR file\")\r\n retu...
[ "0.6321155", "0.62434244", "0.61522406", "0.6018858", "0.601176", "0.6007655", "0.59877974", "0.5881265", "0.5834447", "0.5798371", "0.57872856", "0.5778088", "0.57707995", "0.5761493", "0.57495886", "0.57285845", "0.57001275", "0.56570214", "0.56385493", "0.5637989", "0.5608...
0.6669498
0
Given a indexed database db, a list of labels (one for each id), and a list of ids to test, sets this node to the best label.
def pick_best_label(self,db,labels,ids): self.type = 'v' if len(labels) > 0: self.value = vote([labels[id] for id in ids]) else: self.value = None return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learn(self,db,labels):\n self.keys = db.keys[:]\n labelindex = -1\n if isinstance(labels,str):\n labelindex = db.keys.index(labels)\n assert labelindex >= 0,\"label does not exist in database keys\"\n labels = db.get_column(labelindex)\n elif isinsta...
[ "0.68891954", "0.6300339", "0.58898443", "0.5635267", "0.5226151", "0.51870143", "0.51595086", "0.50911933", "0.50677323", "0.49870756", "0.4969618", "0.4949305", "0.4914531", "0.49075586", "0.48753136", "0.48677775", "0.48287308", "0.47792596", "0.47701105", "0.47633627", "0...
0.73168194
0
Given an index database db, a list of labels (one for each id), and a list of ids to train on, computes the optimal split value. It modifies this node to have the optimal split type and value, and then returns the quality of the split as computed by the split_cost function. If features != None, it is a list of available feature indices to use in this split, or a function of 0 arguments that can be called to get a list of features.
def pick_best_split(self,db,labels,ids,features=None): idlabels = [labels[id] for id in ids] if misclassification_error(idlabels) == 0: #base case: no misclassifications self.type = 'v' self.value = idlabels[0] return 0 best = None bestCost = 0 splitval = None discrete = True if features == None: if len(ids) < db.numFeatures(): #look at all present features in the training set features = db.getPresentFeatures(ids) #print len(features),"of",db.numFeatures(),"features selected" else: features = range(db.numFeatures()) elif callable(features): features = features() for i in features: if len(db.entryLists[i]) == 0: continue idiscrete = db.discreteFeature[i] if idiscrete: #count number of labels of a certain value splitter = defaultdict(lambda:defaultdict(int)) #count of labels for missing values nmissing = defaultdict(int) for id in ids: val = db[i,id] if val is None: #missing values go down to all splits nmissing[labels[id]] += 1 continue splitter[val][labels[id]] += 1 if len(splitter) > continuous_variable_threshold: #print "Determined to be a continuous variable" idiscrete = False break if idiscrete: if len(splitter) <= 1: #only a single value continue #count number of missing values in all splits cmax = 0 for k in splitter: for l,v in nmissing.iteritems(): splitter[k][l] += v cmax = max(cmax,sum(splitter[k].values())) #shrink by fraction of (# of ids - largest child)/(# of ids) scale = (1.0-float(cmax)/float(len(ids)))*len(splitter) #evaluate cost cost = split_cost(splitter.values())*scale #print "Split on",i,"information gain",-cost,splitter.values() else: #continuous, need to learn the best split vals = [] presentlabels = [] nonelabels = [] for id in ids: val = db[i,id] if val is None: nonelabels.append(labels[id]) continue vals.append(val) presentlabels.append(labels[id]) if len(vals) <= 1: print "No values for feature",i,"?" print vals continue #print "Considering continuous split on",i s,cost = best_split(vals,presentlabels,nonelabels) scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2 cost *= scale #print "Result",s,"Information gain",-cost if cost < bestCost: best = i bestCost = cost discrete = idiscrete if not idiscrete: splitval = s if best is None: self.type = 'v' if len(ids) > 0: self.value = vote(idlabels) return misclassification_error(idlabels) else: self.value = None return 0 else: self.feature = best #discrete or inequality split if discrete: self.type = 's' else: self.type = 'i' self.value = splitval return bestCost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n ...
[ "0.59855705", "0.57539624", "0.5735463", "0.57259667", "0.5551452", "0.55483556", "0.5174804", "0.5155585", "0.5107157", "0.50866544", "0.50679135", "0.5035721", "0.50326467", "0.5029078", "0.50207394", "0.4985602", "0.49575225", "0.4956578", "0.4938984", "0.49286735", "0.490...
0.7983181
0
Computes counts for ALL compatible predictions for the given entry, taking missing values into account. counts is assumed to be a defaultdict(int) instance
def predict_all(self,entry,counts): if self.type == 'v': counts[self.value] += 1 return v = entry[self.feature] if v is None: for val,c in self.children.iteritems(): c.predict_all(entry,counts) return if self.type == 's': c = None try: c = self.children[v] except KeyError: #print "Unseen value for feature",self.feature,": ",v best = None bestDist = float('inf') for (val,c) in self.children.iteritems(): if abs(val - v) < bestDist: bestDist = abs(val - v) best = c c = best c.predict_all(entry,counts) elif self.type == 'i': if v <= self.value: self.children[0].predict_all(entry,counts) else: self.children[1].predict_all(entry,counts) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def none_count(d):\n return six.moves.reduce(lambda x, y: x + 1 if y == None else x, d.values(), 0)", "def _collect_counts(self, instance_list):\n \"\"\" Based on each instance, I augment empirical counts for every word and its BIO label in feature_count_table and for every transition from previous lab...
[ "0.627431", "0.6079571", "0.5920963", "0.5891744", "0.5813785", "0.5808457", "0.57633555", "0.5717828", "0.5693999", "0.5685904", "0.5680773", "0.5613226", "0.55953914", "0.5592989", "0.5554436", "0.5554436", "0.5554436", "0.5552211", "0.5529736", "0.55194706", "0.55182385", ...
0.5712293
8
Predicts a label for a feature vector
def predict(self,entry): assert self.root is not None,"Decision tree is not initialized" return self.root.predict(entry)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n label = self.clf.predict(vec)\n # print label\n return self.labelmap[label[0]]", "def predict_label(self, src): # real signature unknown; restored from __doc__\n...
[ "0.7951119", "0.77826697", "0.745595", "0.745595", "0.745595", "0.7453583", "0.7453583", "0.74425894", "0.74354345", "0.73819107", "0.73819107", "0.73819107", "0.73376524", "0.7261632", "0.72183657", "0.721681", "0.7200597", "0.7177909", "0.71714", "0.71003073", "0.709932", ...
0.0
-1
Learns from a Database instance. Each entry is given a label.
def learn(self,db,labels): self.keys = db.keys[:] labelindex = -1 if isinstance(labels,str): labelindex = db.keys.index(labels) assert labelindex >= 0,"label does not exist in database keys" labels = db.get_column(labelindex) elif isinstance(labels,int): labelindex = labels labels = db.get_column(labelindex) else: assert len(labels) == len(db.entries) self.root = DecisionTreeNode() if labelindex >= 0: raise NotImplementedError("Ooops, taking out indexed label broken") entries = np.delete(entries,labelindex,1) db = IndexedDatabase(db) if self.maxnodes != None: return self.greedy_learn_search(db,labels) else: self.deepest = 0 return self.greedy_learn(self.root,db,labels,range(len(labels)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_data_to_db(labelled):\n add_query = sqlite3.connect(DB_PATH).cursor()\n add_query.execute(\n \"CREATE TABLE IF NOT EXISTS labels(text TEXT, label TEXT, score FLOAT)\")\n for entry in labelled:\n add_query.execute(\"\"\"INSERT INTO labels(text,label,score) VALUES(?,?,?)\"\"\",\n ...
[ "0.607027", "0.5603955", "0.52142614", "0.51333624", "0.51184493", "0.5110511", "0.50444543", "0.50385123", "0.50106204", "0.50006604", "0.49866673", "0.49164563", "0.48594052", "0.48593736", "0.48549092", "0.48429736", "0.4827033", "0.48192468", "0.4811291", "0.48048383", "0...
0.6163972
0
Returns the maximum depth of the tree
def depth(self): return max(n.depth for n in self.iternodes())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_tree_depth(self):\n\n depths = np.array([leaf.tree_depth for leaf in self.leaves])\n\n return depths.max()", "def _max_depth(self):\n max_depth = 0\n for node, data in self.traverse():\n max_depth = max(max_depth, data['level'])\n return max_depth", "def ma...
[ "0.88220024", "0.8809473", "0.8737049", "0.8678075", "0.8640208", "0.84385777", "0.84369", "0.82397825", "0.8233229", "0.8227665", "0.82105106", "0.8154944", "0.8108934", "0.80783707", "0.8075165", "0.7958314", "0.7903902", "0.7895258", "0.78915644", "0.7854622", "0.7788676",...
0.8212468
10
Returns the total number of nodes in the tree
def numNodes(self): res = 0 for n in self.iternodes(): res += 1 return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\n return self.__tree.node_count", "def count_nodes(self):\n if self.children is None:\n return 0\n\n total_count = 0\n for child in self.children:\n if child is None:\n return 0\n child_count = child.count_nodes()\n ...
[ "0.85499597", "0.8500708", "0.84604555", "0.84518975", "0.8407748", "0.83981884", "0.8378013", "0.832748", "0.83060443", "0.826005", "0.81602746", "0.81444424", "0.81200975", "0.808973", "0.80409694", "0.8024372", "0.8013639", "0.800769", "0.800769", "0.79400194", "0.7878856"...
0.83307356
7
Returns all nodes in the tree
def nodes(self): return [n for n in self.iternodes()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_nodes(self):\n return self._get_all_nodes()", "def get_nodes(self):\n all_nodes = [] \n if not self._root is None:\n all_nodes.append(self._root)\n i = 0\n while i < len(all_nodes):\n for node in all_nodes[i]._children:\n ...
[ "0.8067358", "0.805089", "0.80140465", "0.7930633", "0.7844008", "0.7699735", "0.766375", "0.7651623", "0.7554823", "0.74022615", "0.73905694", "0.73905694", "0.72834", "0.7274381", "0.7274381", "0.7274381", "0.72736555", "0.7273303", "0.7249022", "0.7241052", "0.7209985", ...
0.7894537
4
Pretty prints the tree
def pprint(self,indent=0,node=None): if node == None: node = self.root if node == None: print_indent(indent) print "[empty tree]" return if node.type == 'v': print_indent(indent) print node.value elif node.type == 's': for (val,c) in node.children.iteritems(): print_indent(indent) print "-",self.keys[node.feature],"=",val,":" self.pprint(indent+1,c) elif node.type == 'i': print_indent(indent) print self.keys[node.feature],"<=",node.value,":" self.pprint(indent+1,node.children[0]) print_indent(indent) print self.keys[node.feature],">",node.value,":" self.pprint(indent+1,node.children[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))", "def pprint(tree):\n p = PrettyPrinter(indent=2)\n p.pprint(tree)", "def print_tree(self):\n return \"\"", "def printTree(self):\n print(printTreeF(self, 0, self))", "def pretty_print(self):\n return self...
[ "0.85880834", "0.8403361", "0.81731063", "0.81423867", "0.813626", "0.8090559", "0.8077962", "0.80575883", "0.8041075", "0.7977933", "0.79050046", "0.7841601", "0.78335243", "0.7790531", "0.77501893", "0.77443475", "0.7728508", "0.7723257", "0.7723172", "0.7723172", "0.771828...
0.7580208
24
Can overload this to choose different features
def feature_subset(self,node,db,labels,ids): return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature():\n pass", "def support(self):", "def feat():\n pass", "def feature(self):\n Feature(run=default_frame, flags=TE)\n Feature(run=load(\"window_functions.tests.rows_frame\", \"feature\"), flags=TE)\n Feature(run=load(\"window_functions.tests.range_frame\", \"feature\"), flags=TE)\n ...
[ "0.72120637", "0.6650957", "0.6531752", "0.63173395", "0.630279", "0.62784165", "0.62421846", "0.62280315", "0.6188825", "0.6120786", "0.61044574", "0.6051126", "0.5929795", "0.5882283", "0.5840704", "0.57487196", "0.5733037", "0.5721487", "0.57165647", "0.56840765", "0.56801...
0.0
-1
Given a indexed database, greedily and recursively learns the split value for the subtree of the indicated node. Return value is the number of mistakes made by the decision tree. Missing values are handled properly as indicating a 'don't care' value that gets passed down to both sides of the tree.
def greedy_learn(self,node,db,labels,ids): if node.depth >= self.maxdepth or len(ids) <= self.minexamples: #terminate recursion node.pick_best_label(db,labels,ids) err = misclassification_error([labels[id] for id in ids]) if err > 0: print "Reached a leaf and had to make some sacrifices, cost",err print " depth",node.depth print " labels",[labels[id] for id in ids] return err features = self.feature_subset(node,db,labels,ids) cost = node.pick_best_split(db,labels,ids,features) #do a split if node.type == 'v': #base case: no misclassifications """ if cost>0: print "greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero" print "cost=",cost,"misclassification=",misclassification_error([labels[id] for id in ids]) print "# of ids:",len(ids) for i in ids: print "id",i,",", for k in range(db.numFeatures()): if db[k,i] != None: print k,"=",db[k,i],",", print "label",labels[i] raw_input() """ return 0 elif node.type == 's': #print "Picked feature",node.feature,"split" #do a discrete split node.children = dict() #select sub-indices Eids = defaultdict(list) noneids = [] for id in ids: v = db[node.feature,id] if v is None: #item doesn't exist, it's a missing value noneids.append(id) else: Eids[v].append(id) #print " split sizes:",[len(x) for x in Eids.values()] #print " None ids:",len(noneids) ids = None errors = 0 for v,vids in Eids.iteritems(): #recurse c = DecisionTreeNode(node) #print "Recursing on value",v #print " ids:",vids errors += self.greedy_learn(c,db,labels,vids+noneids) node.children[v] = c if c.depth > self.deepest: self.deepest = c.depth print "Decision tree learner: Reached node with depth",self.deepest return errors else: #do an inequality split assert node.type == 'i' #print "Picked feature",node.feature,"inequality value",node.value,"cost",cost leftids = [] rightids = [] for id in ids: if db[node.feature,id] is not None: if db[node.feature,id] <= node.value: leftids.append(id) else: rightids.append(id) else: leftids.append(id) rightids.append(id) if len(rightids) == len(ids) or len(leftids) == len(ids): #due to missing values, this split is useless errors = misclassification_error([labels[id] for id in ids]) print "useless split on feature",node.feature,"value",node.value,"misclassification error",errors print "Left size",len(leftids),"right size",len(rightids) raw_input() node.pick_best_label(db,labels,ids) return errors #clear memory associated with ids list del ids[:] ids = None #print "Left size",len(leftids),"right size",len(rightids) c1 = DecisionTreeNode(node) c2 = DecisionTreeNode(node) #left side errors = self.greedy_learn(c1,db,labels,leftids) #right side errors += self.greedy_learn(c2,db,labels,rightids) #restore index node.children = {0:c1,1:c2} if c1.depth > self.deepest: self.deepest = c1.depth print "Decision tree learner: Reached node with depth",self.deepest return errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_ri...
[ "0.55779785", "0.5408219", "0.5375125", "0.52919394", "0.5285259", "0.5265397", "0.523007", "0.5206214", "0.5131763", "0.5119118", "0.5102969", "0.50822806", "0.50254524", "0.50055486", "0.5000578", "0.498862", "0.49706918", "0.4968559", "0.49658915", "0.49245366", "0.4886081...
0.5963019
0
Identifies the list of example indices that would follow the decision tree to node.
def identify_examples(self,db,labels,node): path = [] while node.parent != None: nkey = None for (k,c) in node.parent().children.iteritems(): if c is node: nkey = k break assert nkey != None path.append((node.parent(),nkey)) node = node.parent() path = path[::-1] nids = len(labels) ids = [] for id in xrange(nids): valid = True for n,ckey in path: f = n.feature val = featureMatrix[f,id] if val is None: #it's a None value, just continue on continue else: key = None if n.type == 'i': key = (0 if val <= n.value else 1) else: key = val if key != ckey: valid = False break if valid: ids.append(id) return ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1...
[ "0.65589315", "0.63866156", "0.6244448", "0.6186481", "0.60354745", "0.5978617", "0.5964173", "0.5956777", "0.589274", "0.5866264", "0.58425367", "0.58398837", "0.58153033", "0.58101755", "0.58045644", "0.57759035", "0.57590544", "0.57380056", "0.57340384", "0.5718878", "0.57...
0.68075645
0
Same as greedy learn, but with a maximum number of nodes. Rather than a DFS, this uses a priority queue that at each step splits the node with the maximum improvement in misclassification error. At most maxnodes are in the resulting tree, and the depth is limited to maxdepth. Returns the total number of misclassifications of the training set. There is a lowmemory mode when self.lowmem == True or self.lowmem == 'auto' and the number of saved ids at a node grows beyond a certain number (self.lowmem_threshold, 10m by default). In lowmemory mode, the subset of of examples at a given node is determined dynamically, which incurs a O(|D|d) cost per node, where d is the depth of the node. Overall this raises running time by a factor of approximately O(|D| log_2 |D|).
def greedy_learn_search(self,db,labels): queue = PriorityQueue() dolowmem = (self.lowmem == True) numidsets = 0 root_ids = range(len(labels)) queue.push((self.root,root_ids),len(labels)) numnodes = 1 deepest = 0 err = 0 while len(queue) > 0 and numnodes+2 <= self.maxnodes: #print "%d nodes, priority %d"%(numnodes,queue.nextkey()) nerr = queue.nextkey() (node,trainingset) = queue.pop() #print "Greedy learn",len(trainingset) if trainingset is None: trainingset = self.identify_examples(db,labels,node) if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples: #print " Hit depth or training set limit" node.pick_best_label(db,labels,trainingset) err += misclassification_error([labels[id] for id in trainingset]) continue features = self.feature_subset(node,db,labels,trainingset) cost = node.pick_best_split(db,labels,trainingset,features) numidsets -= len(trainingset) #do a split if node.type == 'v': continue elif node.type == 's': #discrete split node.children = dict() #select sub-indices Eids = defaultdict(list) noneids = [] for id in trainingset: v = db[node.feature,id] if v is None: #item doesn't exist, it's a missing value noneids.append(id) else: Eids[v].append(id) #determine whether to switch to low-memory mode if not dolowmem and self.lowmem=='auto': for v,vids in Eids.iteritems(): numidsets += len(vids)+len(noneids) if numidsets > self.lowmem_threshold: print "Decision tree learner switching to low-memory mode" dolowmem = True trainingset = None numnodes += len(Eids) #print "Split sizes",[len(v) for v in Eids.itervalues()] #print "None size",len(noneids) for v,vids in Eids.iteritems(): #print "->",len(vids),"+",len(noneids) #recurse c = DecisionTreeNode(node) node.children[v] = c err = misclassification_error([labels[id] for id in vids+noneids]) cids = (None if dolowmem else vids+noneids) queue.push((c,cids),err) if c.depth > deepest: deepest = c.depth print "Decision tree learner: Reached node with depth",deepest else: #do an inequality split assert node.type == 'i',"Got a weird type? "+str(node.type) leftids = [] rightids = [] for id in trainingset: val = db[node.feature,id] if val is not None: if val <= node.value: leftids.append(id) else: rightids.append(id) else: leftids.append(id) rightids.append(id) if len(leftids)==0 or len(rightids)==0: print "node feature "+str(node.feature)+" doesn't have a valid split value "+str(node.value) vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None] print "min,max of training set:",min(vals),max(vals) print "cost is",cost raw_input() assert len(leftids) > 0 and len(rightids) > 0 if not dolowmem and self.lowmem=='auto': numidsets += len(leftids) + len(rightids) if numidsets > self.lowmem_threshold: print "Decision tree learner switching to low-memory mode" dolowmem = True trainingset = None numnodes += 2 c1 = DecisionTreeNode(node) c2 = DecisionTreeNode(node) node.children = {0:c1,1:c2} #print "->",len(leftids) #print "->",len(rightids) err1 = misclassification_error([labels[id] for id in leftids]) err2 = misclassification_error([labels[id] for id in rightids]) if dolowmem: leftids = None rightids = None queue.push((c1,leftids),err1) queue.push((c2,rightids),err2) if c1.depth > deepest: deepest = c1.depth print "Decision tree learner: Reached node with depth",deepest #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes if len(queue) > 0: print "%d nodes remaining in queue, setting to leaves"%(len(queue),) for (node,trainingset) in queue: node.pick_best_label(db,labels,trainingset) err += misclassification_error([labels[id] for id in trainingset]) return err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_node_count\")", "def max_nod...
[ "0.6156422", "0.6155967", "0.59827256", "0.5911448", "0.5853999", "0.583166", "0.5827011", "0.5772355", "0.5760408", "0.570417", "0.5690565", "0.5642428", "0.55889016", "0.556948", "0.5526726", "0.5512847", "0.5484053", "0.54723763", "0.54671174", "0.5442169", "0.54306436", ...
0.62972176
0
Initializes the list. If entries is given, this initializes the entries of the list. If memoized = True, any lazy evaluated entries are saved after their first evaluation.
def __init__(self,entries=None,memoized=False): if entries is not None: self.entries = entries[:] else: self.entries = [] self.memoized = memoized
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, contents=()):\n self._data = [self._Item(k, v) for k,v in contents] # empty by default\n if len(self._data) > 1:\n self._heapify()", "def init_all_entries(self) -> bool:\n raise NotImplementedError", "def __init__(self, owner, entries=None):\n\n self.o...
[ "0.5535386", "0.54829353", "0.5454826", "0.53897613", "0.5307603", "0.5187028", "0.5180785", "0.514409", "0.50772923", "0.5034808", "0.5021943", "0.5018495", "0.49956906", "0.49621394", "0.49150985", "0.49012715", "0.4892579", "0.48834223", "0.48831782", "0.48757192", "0.4859...
0.7950624
0
Given a training and testing dataset, builds a decision tree and tests it
def test_decision_tree(train,test,maxnodes=None): tree = DecisionTree() tree.maxnodes = maxnodes errors = tree.learn(train,'label') print "Decision tree makes",errors,"errors" print "Depth",tree.depth(),"nodes",tree.numNodes() if tree.numNodes() < 100: tree.pprint() if errors > 0: print "Training errors:" for id,e in enumerate(train.entries): res = tree.predict(e[:-1]) if res != e[-1]: if len(e[:-1]) > 10: print " Error on",id,"prediction",res else: print " Error on",e[:-1],"prediction",res print "Testing error:" tp,tn,fp,fn = 0,0,0,0 for e in test.entries: res = tree.predict(e[:-1]) if res and e[-1]: tp += 1 elif res and not e[-1]: fp += 1 elif not res and e[-1]: fn += 1 else: tn += 1 Ntest = len(test.entries) print "True +: %g, True -: %g"%(float(tp)/Ntest,float(tn)/Ntest) print "False -: %g, False +: %g"%(float(fn)/Ntest,float(fp)/Ntest) print "Overall error: %g"%(float(fn+fp)/Ntest,)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_and_evaluate_decision_tree(X_train, y_train, X_test, y_test):\n model = DecisionTreeClassifier(criterion='entropy')\n model.fit(X_train, y_train)\n y_pred = model.predict(X_train)\n y_heldPred = model.predict(X_test)\n acc_train = accuracy_score(y_train, y_pred)\n acc_heldOut = accuracy...
[ "0.71724963", "0.6940502", "0.6876765", "0.6875179", "0.68419737", "0.68007624", "0.671924", "0.6693489", "0.6642988", "0.6619835", "0.657808", "0.6437057", "0.6418784", "0.63988245", "0.6392599", "0.63923985", "0.6373904", "0.63466936", "0.6346446", "0.6345242", "0.63415146"...
0.6959578
1
Test the template tag js_settings
def test_js_settings(mocker, rf): mocker.patch( "mitxpro.templatetags.js_interop.get_js_settings", return_value={"data": "value"}, ) request = rf.get("/") context = Context({"request": request}) template = Template(("{% load js_interop %}" "{% js_settings %}")) rendered_template = template.render(context) assert ( rendered_template == """<script type="text/javascript"> var SETTINGS = {"data": "value"}; </script>""" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jssettings(self):\n self.update()\n return \"var %s = %s\" % (self.js_var_settings_name,\n json.dumps(self.settings))", "def test_jssettings(self):\n settings_fullpath = os.path.join(dirs.get_main_js_dir(), \"mediabrute-settings.js\")\n \n i...
[ "0.6923433", "0.6532566", "0.6531576", "0.6457588", "0.6457588", "0.6105654", "0.6031209", "0.60102826", "0.5945879", "0.5761625", "0.57324207", "0.5658938", "0.5633526", "0.5626261", "0.5626261", "0.55889726", "0.5578439", "0.5576125", "0.5567976", "0.5524449", "0.54723006",...
0.8117115
0
Uses keypoint algorithm SIFT to extract feature points from the image and get point correspondences
def getFeatureMatches(img1, img2): sift = xfeatures2d.SIFT_create() kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) Left_Pts = list() Right_Pts = list() # Ratio criteria according to Lowe's paper for i, (m, n) in enumerate(matches): if m.distance < 0.5 * n.distance: Left_Pts.append(kp1[m.queryIdx].pt) Right_Pts.append(kp2[m.trainIdx].pt) left = np.array(Left_Pts) right = np.array(Right_Pts) features = (left, right) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SIFT_features(self):\n start_time = datetime.datetime.now() \n self.func_log(\"\\n\\tIn SIFT_features()\")\n \n key_points = {}\n descriptor_list = []\n SIFT = cv2.xfeatures2d.SIFT_create()\n \n self.func_log(\"\\t\\tSIFT feature extrac...
[ "0.75240153", "0.7496029", "0.7456625", "0.73814374", "0.7321232", "0.7184007", "0.7014539", "0.6925322", "0.68039143", "0.6791409", "0.6788328", "0.6726114", "0.66914046", "0.66845495", "0.66036594", "0.6586292", "0.6563133", "0.6560529", "0.6559361", "0.65400463", "0.644459...
0.67450947
11
This function computes the fundamental matrix by computing the SVD of Ax = 0 ; 8point algorithm
def computeFundamentalMatrix(pts1, pts2): A = np.empty((8, 9)) for i in range(len(pts1)-1): x1 = pts1[i][0] x2 = pts2[i][0] y1 = pts1[i][1] y2 = pts2[i][1] A[i] = np.array([x1 * x2, x2 * y1, x2, y2 * x1, y2 * y1, y2, x1, y1, 1]) # Compute F matrix by evaluating SVD U, S, V = np.linalg.svd(A) F = V[-1].reshape(3, 3) # Constrain the F matrix to rank 2 U1, S1, V1 = np.linalg.svd(F) # print('Old S', S) # S[2] = 0 S2 = np.array([[S1[0], 0, 0], [0, S1[1], 0], [0, 0, 0]]) # print('New S', S) F = np.dot(np.dot(U1, S2), V1) return F
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return sla.svd(A, full_matrices=False)", "def invert_L1_svd():", "def visualize_svd():", "def svd(self):\n U, s, Vh = la.svd(self)\n S = np.zeros(self.shape)\n np.fill_diagonal(S, s)\n return...
[ "0.66048837", "0.6466162", "0.6259937", "0.6250825", "0.62505597", "0.62274474", "0.6104567", "0.6089218", "0.6025379", "0.5982765", "0.597328", "0.590215", "0.58907986", "0.58582675", "0.58575904", "0.584388", "0.58408606", "0.58376825", "0.581499", "0.58008623", "0.5792866"...
0.68444854
0
Leverages the 8point algorithm and implement RANSAC algorithm to find the inliers and the best fundamental matrix
def getInlierRANSAC(pts1, pts2): # global finalFundamentalMatrix iterations = 50 threshold = 0.01 max_count = 0 n = len(pts1) finalFundamentalMatrix = np.zeros((3, 3)) for i in range(iterations): count = 0 idx = random.sample(range(n - 1), 8) left_pts = pts1[idx] right_pts = pts2[idx] F = computeFundamentalMatrix(left_pts, right_pts) left_feature_inlier = [] right_feature_inlier = [] # print("Sample index: ", len(idx)) for j in range(0, n): homogeneous_right = np.array([pts2[j, 0], pts2[j, 1], 1]) homogeneous_left = np.array([pts1[j, 0], pts1[j, 1], 1]) fit = np.dot(homogeneous_right.T, np.dot(F, homogeneous_left)) # print("Fit for iteration ", i," ", np.abs(fit)) if np.abs(fit) < threshold: left_feature_inlier.append(pts1[j]) right_feature_inlier.append(pts2[j]) count = count + 1 # print('Inlier count', count) inlier_Left = np.array(left_feature_inlier) inlier_Right = np.array(right_feature_inlier) if count > max_count: max_count = count finalFundamentalMatrix = F final_inlier_Left = inlier_Left final_inlier_Right = inlier_Right return finalFundamentalMatrix, final_inlier_Left, final_inlier_Right
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ransac(data, hypothesis, metric, sample_size, num_iter, inlier_thresh):\n N,d = data.shape\n best_frac, best_hypothesis, best_mask = 0, None, None\n for i in range(num_iter):\n js = np.random.choice(N,size=sample_size,replace=False)\n hypothesis_elements = data[js,:]\n H = hypothe...
[ "0.6203597", "0.5916464", "0.5894118", "0.5867515", "0.5715989", "0.56956524", "0.56905115", "0.5686345", "0.56403846", "0.55984086", "0.5590803", "0.5577823", "0.5559308", "0.5542196", "0.5525735", "0.55202436", "0.55189615", "0.55174667", "0.5481329", "0.5479639", "0.546803...
0.699575
0
This function computes the essential matrix from the fundamental matrix. The E matrix is defined in normalized image coordinates
def getEssentialMatrix(K, F): E = np.dot(K.T, np.dot(F, K)) u, s, v = np.linalg.svd(E) # We correct the singular values of the E matrix s_new = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]]).reshape(3, 3) final_E = np.dot(u, np.dot(s_new, v)) return final_E
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v", "def P(self):\n self.eigenmatrix()", "def Euler2Mat(e):\n x=e[0]\n y=e[1]\n z=e[2]\n s1=np.sin(x)\n s2=np.sin(y)\n s3=np.sin(z)\n c1...
[ "0.60159427", "0.6003989", "0.5936057", "0.5899959", "0.58792543", "0.58560514", "0.5814833", "0.58042705", "0.57690716", "0.5768329", "0.57617337", "0.57117414", "0.5709993", "0.5708076", "0.5683127", "0.56726044", "0.56690747", "0.5665278", "0.5657497", "0.56303555", "0.561...
0.6334938
0
Given the essential matrix, we derive the camera position and orientation
def ExtractCameraPose(E): u, s, v = np.linalg.svd(E, full_matrices=True) w = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]).reshape(3, 3) c1 = u[:, 2].reshape(3, 1) r1 = np.dot(np.dot(u, w), v).reshape(3, 3) c2 = -u[:, 2].reshape(3, 1) r2 = np.dot(np.dot(u, w), v).reshape(3, 3) c3 = u[:, 2].reshape(3, 1) r3 = np.dot(np.dot(u, w.T), v).reshape(3, 3) c4 = -u[:, 2].reshape(3, 1) r4 = np.dot(np.dot(u, w.T), v).reshape(3, 3) if np.linalg.det(r1) < 0: c1 = -c1 r1 = -r1 if np.linalg.det(r2) < 0: c2 = -c2 r2 = -r2 if np.linalg.det(r3) < 0: c3 = -c3 r3 = -r3 if np.linalg.det(r4) < 0: c4 = -c4 r4 = -r4 cam_center = np.array([c1, c2, c3, c4]) cam_rotation = np.array([r1, r2, r3, r4]) return cam_center, cam_rotation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_camera_orientation(self):\n\n # Create the vector from the camera to the robot\n vector_x = self.robot_x - self.camera_x\n vector_y = self.robot_y - self.camera_y\n vector_z = self.robot_z - self.camera_z\n\n # Calculate yaw and pitch from this vector\n yaw = math....
[ "0.6524759", "0.64992476", "0.6400119", "0.62868214", "0.6233258", "0.62323457", "0.6178379", "0.6016908", "0.6012133", "0.60015947", "0.5997646", "0.59892434", "0.5961536", "0.5942139", "0.59395564", "0.5925271", "0.5909638", "0.58591086", "0.58126867", "0.581134", "0.580240...
0.7063537
0
This function returns the extrinsic parameter matrix
def getExtrinsicParameter(K, R, C): t = np.dot(-R, C) homogeneous_matrix = np.hstack((R.reshape(3, 3), t)) extrinsic_parameter = np.dot(K, homogeneous_matrix) return extrinsic_parameter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_intrinsic_mat(params):\n return np.asarray(\n [\n [params[0], 0.0, params[1]],\n [0.0, params[2], params[3]],\n [0.0, 0.0, 1.0],\n ]\n )", "def get_extrinsic_matrix(pose):\n batch_size, _ = pose.shape\n rot = pose[:,:3]\n trans = pose[:,3:]\n\...
[ "0.7331173", "0.63358027", "0.63124555", "0.6310796", "0.61443675", "0.61443675", "0.61398846", "0.6122645", "0.6090896", "0.59263265", "0.58786243", "0.5862724", "0.5847503", "0.58337194", "0.57778543", "0.5750955", "0.57484686", "0.57484686", "0.5668891", "0.5660093", "0.56...
0.6725677
1
Gets the translation vector and rotation matrix of the camera w.r.t the world frame and removes camera frame ambiguity
def getDisambiguousPose(K, C, R, left_features, right_features): check = 0 for i in range(0, len(R)): count = 0 extrinsic_params = getExtrinsicParameter(K, R[i], C[i]) for j in range(0, len(left_features)): X = getTriangulationPoint(K, extrinsic_params, left_features[j], right_features[j]) r3 = R[i][2, :].reshape((1, 3)) cheiralityCondition = np.dot(r3, X[:3] - C[i]) if cheiralityCondition > 0 and X[2] >= 0: count += 1 if count > check: check = count Translation = C[i] Rotation = R[i] if Translation[2] < 0: Translation = -Translation return Translation, Rotation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_camera_transform(self):\r\n if not self.pose:\r\n rospy.loginfo(\"no pose!\")\r\n return None\r\n if self.pose.header.frame_id != self.role_name:\r\n rospy.logwarn(\"Unsupported frame received. Supported {}, received {}\".format(\r\n self.role_n...
[ "0.653753", "0.6497488", "0.6299928", "0.62356544", "0.61046827", "0.60589045", "0.60228235", "0.5942271", "0.5935986", "0.5927937", "0.5889109", "0.58773947", "0.58653027", "0.5864766", "0.5847944", "0.58465064", "0.5835871", "0.5828304", "0.57876843", "0.5779836", "0.577296...
0.0
-1
return enantiomer of self, either D or L
def D_or_L(self) -> str: CO = np.array([self['C'].xyz.x, self['C'].xyz.y, self['C'].xyz.z]) CA = np.array([self['CA'].xyz.x, self['CA'].xyz.y, self['CA'].xyz.z]) CB = np.array([self['CB'].xyz.x, self['CB'].xyz.y, self['CB'].xyz.z]) N = np.array([self['N'].xyz.x, self['N'].xyz.y, self['N'].xyz.z]) v1 = N - CO v2 = CA - CO cp = np.cross(v1, v2) CB_infront = cp.dot(CB-CA) > 0 print(CB_infront) return 'D' if CB_infront else 'L'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def L(self):\n if not self.isVaild():\n pass\n return self.Lq() + self.r()", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = form...
[ "0.58051234", "0.5680773", "0.5520372", "0.5517061", "0.5517061", "0.5517061", "0.5517061", "0.54927427", "0.53145987", "0.53131396", "0.53023165", "0.52391076", "0.51286054", "0.5116414", "0.5094388", "0.5080737", "0.50450814", "0.50200135", "0.4995237", "0.4990676", "0.4958...
0.5174654
12
removes all Hydrogen atoms from instance
def remove_hydrogens(self) -> None: for cid, c in self: for rid, r in c: for aid, a in r: if a.element == 'H': print('removing H at %s' % aid) r.remove_atom(a)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n del self.shx.atoms[self.index]", "def strip(self):\n types = [type(self.strip),\n type(self.values),\n type(self.__ne__),\n type(self.__class__)]\n\n for attr in dir(self):\n if not type(getattr(self, attr)) in ty...
[ "0.6937832", "0.6847128", "0.6714238", "0.6525774", "0.64063466", "0.63946915", "0.62644595", "0.6262052", "0.6121503", "0.60625106", "0.60625106", "0.6042291", "0.6015206", "0.5961813", "0.5960132", "0.592724", "0.5911329", "0.58740425", "0.5869668", "0.5853029", "0.5847566"...
0.7476278
0
collect a set of residues with memb_z within [15, 15]
def memb_residues(pdb: MyPDB) -> list(): result = [] for ch in pdb.chains.values(): for res in ch.values(): if res.memb_z is not None: result.append(res) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subset_mz(self, mz):\n regions = self.boxes_mz.at(mz)\n it = BoxHolder()\n for r in regions:\n box = r.data\n it.add_box(box)\n return it", "def get_subset(mlist,year):\n newlist = []\n for entry in mlist:\n if int(entry[0][:4]) > int(year):\...
[ "0.5860528", "0.5294917", "0.5274569", "0.5238881", "0.51128125", "0.50799537", "0.50743914", "0.4973342", "0.49630117", "0.49471557", "0.49072868", "0.48891437", "0.48831913", "0.48573068", "0.48536083", "0.4842539", "0.4838259", "0.48302126", "0.48269477", "0.48252738", "0....
0.59874827
0