query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Fonction pour placer un petit marquer de couleur 'col' a la coordonnee (xa,ya)
def create_mark(xa,ya,col): disque = canvas.create_oval(xa-2,ya-2,xa+2,ya+2,fill=col,outline=col) return disque
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def em_coord_turtle(lin, col, dim, tam_celula):\n meio = dim // 2\n x = (col - meio) * tam_celula\n y = (meio - lin) * tam_celula\n return x, y", "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top...
[ "0.6763953", "0.67502594", "0.6700341", "0.6349989", "0.6342108", "0.62057173", "0.6079546", "0.6056177", "0.6021455", "0.6001277", "0.5997666", "0.5983044", "0.5976914", "0.5969578", "0.5954558", "0.59523946", "0.5932657", "0.5922999", "0.59033656", "0.5902169", "0.5873227",...
0.0
-1
Prend un couple de coordonnees (x,y) et retourne le couple (a,b) tel que le point represente par (x,y) soit dans la case de coin hautgauche de coordonnes (a,b)
def id_case(x,y): try: assert (100<x<1100 and 200<y<700) a=int((x-100)/cote)*cote+100 b=int((y-200)/cote)*cote+200 return (a,b) except AssertionError: print(x,y) print("Le couple a identifier n'est pas dans le rectangle")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _transform_point(self, x, y):\n return (x, y)", "def coord (i, j):\r\n return j, i", "def coordinates(self):", "def coordinate_from_points(pnta: Vector, pntb: Vector,\n pntc: Vector) -> Coordinate:\n pnt = pnta\n vecx = pntb - pnta\n vecxy = pntc - pnta\n r...
[ "0.64014715", "0.6380654", "0.62387294", "0.61907715", "0.61892194", "0.6171595", "0.6140475", "0.61289203", "0.6101151", "0.60843295", "0.60767776", "0.602639", "0.6021461", "0.6011172", "0.6002909", "0.5987593", "0.59744483", "0.5946929", "0.59399027", "0.5933556", "0.59327...
0.57720083
37
Fonction qui renvoie les disques qui le touchent directement Prend en argument un disque Retourn la liste des disques a son contact / liste vide si aucun
def C(v,securite): to_return = set() x,y = l[v][0],l[v][1] a,b = id_case(x,y) #on recupere la case ou se trouve le disque qu'on test voisinage = set(cases[a,b]) #on recupere la liste du voisinage (pas forcement contact) #4 #012 #345 #678 if a>100: voisinage = add_list(voisinage,cases[a-4*rayon,b]) #3 if b>200: voisinage = add_list(voisinage,cases[a-4*rayon,b-4*rayon]) #0 voisinage = add_list(voisinage,cases[a,b-4*rayon]) #1 if b<600: voisinage = add_list(voisinage,cases[a-4*rayon,b+4*rayon]) #6 voisinage = add_list(voisinage,cases[a,b+4*rayon]) #7 if a<1100-4*rayon: voisinage = add_list(voisinage,cases[a+4*rayon,b]) #5 if b>200: voisinage = add_list(voisinage,cases[a+4*rayon,b-4*rayon]) #2 voisinage = add_list(voisinage,cases[a,b-4*rayon]) #1 if b<600: voisinage = add_list(voisinage,cases[a+4*rayon,b+4*rayon]) #8 voisinage = add_list(voisinage,cases[a,b+4*rayon]) #7 #On ajoute plusieurs fois le meme a un ensemble -> pas grave for i in voisinage: xb,yb = l[i][0],l[i][1] if 0<sqrt((x-xb)**2+(y-yb)**2)<=2*rayon+securite: to_return.add(i) return to_return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receiveContactList(self, contactList):", "def controlList(user):\n insListe = Inserito.objects.filter(user=user, cancellato=False).select_related('listaAttesa')\n for l in insListe:\n numPosti = l.listaAttesa.corso.cap - l.listaAttesa.corso.posti_prenotati\n if numPosti > 0:\n ...
[ "0.50994366", "0.4944012", "0.4894424", "0.4877875", "0.4851604", "0.48309383", "0.48296744", "0.47853267", "0.47807354", "0.4748723", "0.47304198", "0.47265714", "0.46883455", "0.4688048", "0.46656272", "0.46631646", "0.465828", "0.4652967", "0.46512303", "0.4631358", "0.461...
0.0
-1
Fonction qui prend en argument un disque Qui renvoie tous les disques en son contact direct ou non (le contact du contact du contact ... est renvoye) et qui se trouve derriere v
def T(v,securite): to_return = {} #renvoie le dictionnaire {indice du contact (0 -> direct / sinon -> plus ou moins direct) : set({disque})} Cv = set(C(v,securite)) Tv = set(Cv) i=0 xv,yv=l[v][0],l[v][1] while Cv != set() and i<5: to_return[str(i)]=Cv new_Cv = set() for j in Cv: xj,yj=l[j][0],l[j][1] #si j est devant v, on ne le copte pas if sqrt((xj-xt)**2+(yj-yt)**2)<sqrt((xv-xt)**2+(yv-yt)**2): continue new_Cv= new_Cv.__or__(C(j,securite).__sub__(Tv.__or__(set(j).__or__({v})))) Tv = Tv.__or__(new_Cv) Cv = new_Cv i+=1 return to_return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyse_donnees(self, mere, foetus, pere, log):\n concordance_mf = 0\n concordance_pf = None\n if len(pere) != 0:\n concordance_pf = 0\n log = log + \"Père détecté.................................\\n\"\n log = log + \"\\n\\nVérification concordance des ADNs...
[ "0.59579074", "0.58650804", "0.56254125", "0.54683447", "0.54610604", "0.5308732", "0.5250842", "0.5229191", "0.5190432", "0.5136807", "0.50936365", "0.50733256", "0.5067113", "0.50111264", "0.49850872", "0.49645695", "0.49620542", "0.49505514", "0.49489334", "0.4907525", "0....
0.48496613
27
pour un indice de disque k, la famille des listes de des indices, et l'indice du disque k dans la famille L, renvoie la liste / l'ensemble des indices de disques dont la vitesse va etre modifiee par k
def influence(k,L,n): try: to_check = L[n-1] #set des indices contact_direct=C(k,0) return list(to_check.intersection(contact_direct)) except: return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_k_indices(self, ks):\n if self.staticneighs:\n idx_ks = ks\n else:\n idx_ks = [self.ks.index(e) for e in ks]\n return idx_ks", "def generate_L(data_set, k, min_support):\n fptree = FPtree.fptree(data_set, min_support)\n print(\"pre:\",datetime.datetime.no...
[ "0.5962983", "0.57616395", "0.57209855", "0.56889814", "0.567445", "0.5651179", "0.5565496", "0.55339205", "0.5526475", "0.5519671", "0.55177283", "0.55137473", "0.54557353", "0.54334563", "0.5427422", "0.54150856", "0.5399065", "0.5382154", "0.53808355", "0.5375292", "0.5372...
0.57878196
1
Pour un vecteur v=(vx,vy) renvoie le vecteur colineaire a v de norme n
def normal(vx,vy,n): if vx==0: if vy==0: return (0,0) else: return (0,n) elif vy==0: return (n,0) else: return (n/sqrt(1+(vy/vx)**2),n/sqrt(1+(vx/vy)**2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collpi2(Te,nev,v):\n return vcrit(Te)/2./v**3*collnu(Te,nev)", "def collnud(Te,nev,v):\n return vcrit(Te)/2./v**3*collnu(Te,nev)", "def f_v(_a, _vs, _Ps, _Ps0): # _aはスカラ, _vsはベクトル, _Ps, _Ps0は3行2列の行列\n center_pos = _Ps[0]\n center_pos_0 = _Ps0[0]\n idx_iter = Index_iterator(1, 8)\...
[ "0.65670574", "0.6445967", "0.6404751", "0.6170087", "0.6155207", "0.6150926", "0.6138606", "0.6094283", "0.6083068", "0.6052513", "0.6026877", "0.5970654", "0.5965773", "0.5965605", "0.59526074", "0.5930183", "0.5930033", "0.59086335", "0.5895722", "0.5877011", "0.5875526", ...
0.59364426
15
Add a new hotel to the system
async def add_hotel_endpoint(request): hotel_name = request.args["hotel_name"][0] hotel_id = model.add_hotel(hotel_name) return json({"hotel_id": hotel_id})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_new_arrival(self):\n pass", "def addHotspot( self, hotspot ):\n self._hotspots.append(hotspot)", "async def add_reservation_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n arrival_date = request.args[\"arrival_date\"][0...
[ "0.63080275", "0.5862244", "0.58347523", "0.5819879", "0.5525398", "0.5483701", "0.54169357", "0.5386112", "0.5373534", "0.52948624", "0.52928376", "0.5286119", "0.5283983", "0.5268902", "0.5244285", "0.52413", "0.5239886", "0.52386993", "0.5225297", "0.5223356", "0.52185214"...
0.67610687
0
Add inventory to a given hotel
async def add_inventory_endpoint(request): hotel_id = request.args["hotel_id"][0] room_type = request.args["room_type"][0] room_inventory = request.args["room_inventory"][0] model.add_inventory(hotel_id, room_type, room_inventory) return json({"success": True})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_to_inventory(self, newItem):\n\n if len(self.player_inventory) >= 8:\n print(\"\"\"You already have the maximum of 7 items in your inventory,\n looks like you will need to get rid of an item to get {}\"\"\".format(newItem.name))\n\n print(\"Would you like to get rid ...
[ "0.6268694", "0.6268613", "0.6260081", "0.6259757", "0.6220235", "0.62095225", "0.6174858", "0.6051948", "0.6041582", "0.6013446", "0.5951593", "0.59234303", "0.5922092", "0.5921586", "0.588076", "0.5787859", "0.5782276", "0.576559", "0.5759909", "0.5720281", "0.56820464", ...
0.6929253
0
Cancel an existing reservation
async def cancel_reservation_endpoint(request): reservation_id = request.args["reservation_id"][0] model.cancel_reservation(reservation_id) return json({"success": True})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancel_reservation(payload, clothes_id):\n selection = Reserve.query.filter_by(clothes_id=clothes_id).all()\n # if the given clothes has not been reserved, abort 404\n if len(selection) == 0:\n abort(404)\n # if two or more user reserved the same clothe, abort umprocessab...
[ "0.7251533", "0.6900359", "0.6353264", "0.63135654", "0.6305536", "0.6305536", "0.6305536", "0.6251353", "0.6242757", "0.6192752", "0.61905634", "0.6167782", "0.6162976", "0.61334383", "0.6130032", "0.6127152", "0.61173344", "0.61173344", "0.6115619", "0.6088514", "0.60856", ...
0.80371726
0
Add a new reservation
async def add_reservation_endpoint(request): hotel_id = request.args["hotel_id"][0] room_type = request.args["room_type"][0] arrival_date = request.args["arrival_date"][0] departure_date = request.args["departure_date"][0] status = request.args["status"][0] reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status) if reservation_id == model.OPERATION_ERROR_RETURN_CODE: return json({"success": False}) return json({"success": True, "reservation_id": reservation_id})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = ...
[ "0.75879747", "0.7112902", "0.6949954", "0.6692203", "0.650362", "0.63385224", "0.6337414", "0.62899226", "0.62474537", "0.61857814", "0.6139244", "0.61172265", "0.60497403", "0.60310566", "0.6028615", "0.6002983", "0.596607", "0.58925354", "0.5867518", "0.5861634", "0.582341...
0.75634503
1
Get an existing reservation
async def get_reservation_endpoint(request): reservation_id = request.args["reservation_id"][0] reservation_dict = model.get_reservation(reservation_id) return json(reservation_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reservation(self):\n return self.request.get('reservation', None)", "def reservation(self):\n return self._reservation", "def reservation_read(token_user, res_id):\n res = Reservation.query.get(res_id)\n if res is None:\n abort(404, 'reservation not found')\n\n return json.dum...
[ "0.7379199", "0.71298105", "0.70966446", "0.70289004", "0.6932207", "0.68829936", "0.66138154", "0.65487725", "0.64873415", "0.64082396", "0.6286343", "0.628403", "0.62172484", "0.6214408", "0.61116004", "0.6033761", "0.60133725", "0.5996568", "0.59386927", "0.5936628", "0.59...
0.7394642
0
List the inventory of a hotel in a specific date range
async def list_inventory_endpoint(request): hotel_id = request.args["hotel_id"][0] start_date = request.args["start_date"][0] end_date = request.args["end_date"][0] inventory = model.list_inventory(hotel_id, start_date, end_date) if inventory == model.OPERATION_ERROR_RETURN_CODE: return json({"success": False}) return json({"success": True, "inventory": inventory})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inventory(request, concierge, template=\"concierges/inventory_check.html\"):\n inventory = []\n for x in xrange(0, 2):\n day = date.today() + timedelta(days=x)\n dow = DayOfWeek.objects.get(isoweekday=day.isoweekday())\n day_info = {'day': day, 'times': []}\n schedules = dow.t...
[ "0.5880754", "0.55358046", "0.55146176", "0.54974526", "0.54716426", "0.54641354", "0.5431443", "0.5413283", "0.5400029", "0.53998613", "0.5383372", "0.5372556", "0.5365455", "0.5363467", "0.5363467", "0.5358362", "0.5348541", "0.53345364", "0.5328369", "0.53199214", "0.53068...
0.71391815
0
Hamming Heuristic (admissible) provides very little speedup, but it's one line and admissible
def spotlessroomba_first_heuristic(state : SpotlessRoombaState) -> float: # TODO a nontrivial admissible heuristic return len(state.dirty_locations)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def chk_hamming(data):\n pass", "def hamming_algorithm(data: bytearray):\n print(f\"data: {data}\")\n # 12345678 12345678 12345678 12345678\n if len(data) % 4 != 0:\n diff = 4 - len(data) % 4\n data += bytes(diff)\n m = len(data)\n r = 0\n chunck = 0\n i = 0\n ret_...
[ "0.75875574", "0.6815475", "0.68022764", "0.626976", "0.62238514", "0.6208715", "0.6082887", "0.60824615", "0.6025382", "0.60161185", "0.5995922", "0.5990126", "0.5911016", "0.5893186", "0.5888975", "0.5871593", "0.58487594", "0.58374834", "0.5815022", "0.5806226", "0.5787427...
0.0
-1
Best Path Heuristic (consistent) (seems to be a very good heuristic) Gives the roomba the ability to pass through walls and ignore additional cost on carpet 1. Find which dirty tile is best to start from For each dirty tile in state.dirty_locations 1.1 Set it as the start node 1.2 Use Total Manhattan Distance(third heuristic) to find route of least cost to visit every other dirty tile 1.3 Compare with previous start tile, and keep the better start (tiebreak with roomba proximity to start tile) 2. Find roomba proximity to the best start tile 3. Add the results of steps 1 and 2 The heuristic is the sum of the distance to the best start tile and the cost from said tile
def spotlessroomba_second_heuristic(state : SpotlessRoombaState) -> float: # TODO a nontrivial consistent heuristic if not state.dirty_locations: return 0 best_start = 0 # best dirty tile to start from best_cost = INF # cost of the path from the above start tile for i in range(len(state.dirty_locations)): estimate_cost = 0 lowest_cost = INF closest_dirty = 0 dirty_locations = list(state.dirty_locations) current_pos = dirty_locations.pop(i) # find the shortest cost solution path from this starting tile while dirty_locations: for j in range(len(dirty_locations)): manhattan = abs(current_pos.row - dirty_locations[j].row) + abs(current_pos.col - dirty_locations[j].col) if manhattan < lowest_cost: lowest_cost = manhattan closest_dirty = j estimate_cost += lowest_cost current_pos = dirty_locations.pop(closest_dirty) lowest_cost = INF # if estimated path cost is cheaper than best path cost so far, replace best_cost and best_start if estimate_cost < best_cost: best_cost = estimate_cost best_start = i # if estimated path cost and best path cost so far are equal, tiebreak with proximity to start tile if estimate_cost == best_cost: current_pos = state.position dist_to_prev_best = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col) dist_to_i = abs(current_pos.row - state.dirty_locations[i].row) + abs(current_pos.col - state.dirty_locations[i].col) if dist_to_i < dist_to_prev_best: best_start = i current_pos = state.position # Calculate distance to the best start tile dist_to_start = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col) # Returned heuristic is the sum of distance to the start tile and estimated cost from said tile return dist_to_start + best_cost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_heuristic(self, state):\n\n def get_manhattan_distance(coord_a, coord_b):\n \"\"\"Returns the manhattan distance between coord_a and coord_b.\"\"\"\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)\n\n \n def get_num_obstacles(coord_a, coord_b):\n ...
[ "0.7437132", "0.7243507", "0.7075924", "0.7064572", "0.6833929", "0.6753228", "0.6737725", "0.64962655", "0.6481526", "0.6475066", "0.6400677", "0.63954955", "0.63701344", "0.6343227", "0.63267416", "0.63048136", "0.6302781", "0.6302682", "0.6297269", "0.6265315", "0.62526554...
0.8600155
0
Total Manhattan Distance Heuristic (neither admissible nor consistent) (this heuristic is included moreso to show the idea Best Path is based, but it is often more effective than Hamming even if it isn't admissible) Gives the roomba the ability to pass through walls and ignore additional cost on carpet 1. Find closest dirty tile in manhattan distance 2. Move roomba to closest dirty tile 3. Repeat 12 until all dirty tiles are clean The heuristic is the total manhattan distance if the roomba moves to the closest dirty tile every time.
def spotlessroomba_third_heuristic(state : SpotlessRoombaState) -> float: h = 0 current_position = state.position dirty_locations = list(state.dirty_locations) partial_heuristic = INF closest_dirty = 0 while dirty_locations: for i in range(len(dirty_locations)): manhattan = abs(current_position.row - dirty_locations[i].row) + abs(current_position.col - dirty_locations[i].col) if manhattan < partial_heuristic: partial_heuristic = manhattan closest_dirty = i h += partial_heuristic current_position = dirty_locations.pop(closest_dirty) partial_heuristic = INF return h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manhattan_distance(self):\n dist = 0\n for target, tile in zip(self.winCdt[:-1], self.tiles[:-1]):\n dist += abs(target[0] - tile[0]) + abs(target[1] - tile[1])\n return dist", "def calculate_manhattan_dist(self):\n return self._current_cost + abs(self._current_loc.get_...
[ "0.7025016", "0.7011789", "0.6967061", "0.6955104", "0.69499224", "0.69179034", "0.6900574", "0.6883391", "0.68710554", "0.677832", "0.65591604", "0.6524315", "0.65080816", "0.65063334", "0.6491734", "0.6422433", "0.6416312", "0.6414841", "0.63929284", "0.63912034", "0.638851...
0.66257834
10
Create a new description tag Descriptions are always required, never hidden, and never limited.
def __init__(self, *args): super().__init__('description', *args, required=True, hidden=False, limit=-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_descr(self, attr_name):", "def description(self, newDescription=None):\n pass", "def add_description(self, description):\n self.add_metric('descript', description)", "def add_description(self, desc):\n self.description = desc", "def description(self, description: str):\n ...
[ "0.695261", "0.69262964", "0.67537904", "0.6662091", "0.6635096", "0.6542747", "0.64898026", "0.64358616", "0.64276004", "0.64130193", "0.6404517", "0.63842374", "0.63729984", "0.63729984", "0.63729984", "0.63729984", "0.6369401", "0.635684", "0.63517755", "0.63242537", "0.63...
0.65792376
5
Generate the header string for this description If the description is empty, return an empty string. Otherwise, the raw data is joined together and returned with no '' components.
def to_header(self): if not self.filled: return '' return "\n".join(self.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _header_string( self, title='title' ): \n return_str = ''\n return_str += '{}\\n\\n'.format( title )\n return_str += '{} atoms\\n'.format( len(self.atoms) )\n if len(self.bonds) != 0:\n return_str += '{} bonds\\n\\n'.format( len(self.bonds) )\n return_str += '...
[ "0.73597187", "0.72831184", "0.71944606", "0.7154044", "0.6919942", "0.6901166", "0.6901166", "0.675267", "0.66368526", "0.66322434", "0.66226566", "0.65845186", "0.658279", "0.658279", "0.6578081", "0.6538257", "0.6538257", "0.6524499", "0.65011746", "0.641547", "0.63942367"...
0.7559777
0
Reads CSV data on initialization.
def initialize(self, ctx): super().initialize(ctx) self._csv_reader = CsvReader() self._csv_reader.data = ctx.interpolate(self.data) self._csv_reader.strip = True ctx.comp.initialize(self._csv_reader) for m in self._csv_reader.process(ctx, None): self.insert(ctx, m)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def read_csv():", "def rea...
[ "0.7757678", "0.7520253", "0.74909085", "0.7298727", "0.72604567", "0.7150921", "0.7114749", "0.69202036", "0.69161856", "0.69122183", "0.68653274", "0.6864344", "0.6778561", "0.66752946", "0.6599824", "0.6594454", "0.6570179", "0.6554265", "0.6542523", "0.6483472", "0.645528...
0.65762395
16
x_max = max(seq) x_min = min(seq) epilson = 1e6 new_seq = [10000 (epilson + x x_min )/(epilson + x_max x_min) for x in seq]
def normalization(seq): new_seq = [6.3578286171 * x for x in seq] return new_seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def geo_seq(val, ratio, length):\n return [val * pow(ratio, i) for i in range(length)]", "def power_e(amount, start, stop, truncated, sequence):\n ratio = .5\n for x in range(start, amount):\n y = abs(round(ratio * math.exp(x)))\n if truncated and y >= stop:\n ...
[ "0.6177466", "0.61661714", "0.5975783", "0.5779353", "0.56950027", "0.56275076", "0.5592511", "0.5567296", "0.5561729", "0.54725146", "0.5414097", "0.5412501", "0.54114294", "0.53760564", "0.5344241", "0.5343695", "0.53412575", "0.53345144", "0.5287912", "0.5285122", "0.52791...
0.6463649
0
Export flat list fo file
def save_list(list_data, path, lineterminator='\n', encoding=None, mode='w'): with open(path, mode) as f: list_data = [item + lineterminator for item in list_data] if encoding is not None: list_data = [item.encode(encoding) for item in list_data] f.writelines(list_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(lst):\n # TODO", "def write_list(self):\n with open(self.path, 'w') as file:\n for i in map(self.addziros, range(1, int(str(1) + self.number_length * '0') + 1)):\n file.write(i + '\\n')\n file.close()", "def export(tako_list, filename):\n for tak in t...
[ "0.69543386", "0.6891831", "0.6824986", "0.6617422", "0.65862507", "0.65442276", "0.65424216", "0.64962", "0.64938307", "0.6466383", "0.6341536", "0.6341411", "0.6335272", "0.63172203", "0.628588", "0.6272017", "0.6266411", "0.6204448", "0.6160987", "0.61450326", "0.6117469",...
0.582233
55
Export flat list fo file using csv
def csv_save_list(list_data, path, lineterminator='\n', encoding=None): with open(path, 'w') as f: writer = csv.writer(f, lineterminator=lineterminator) for item in list_data: if encoding is not None: writer.writerow([item.encode(encoding)]) else: writer.writerow([item])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def write_csv(list_file, path):\n\n\twith open(path, 'w') as f:\n\t\twr...
[ "0.7381743", "0.72397584", "0.720624", "0.7086666", "0.7081523", "0.70391846", "0.6996409", "0.6985795", "0.69504803", "0.6934752", "0.691903", "0.68434834", "0.6836178", "0.6823749", "0.681523", "0.68104255", "0.6807932", "0.6802946", "0.68010736", "0.67903537", "0.677872", ...
0.6799009
19
Export list of list fo file using csv
def save_list_of_list(data, path, lineterminator='\n', encoding=None): with open(path, 'w') as f: writer = csv.writer(f, lineterminator=lineterminator) if encoding is not None: data = [[item.encoding(encoding) for item in items] for items in data] writer.writerows(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_csv(list_file, path):\n\n\twith open(path, 'w') as f:\n\t\twriter = csv.writer(f, delimiter=',')\n\t\tfor i in list_file:\n\t\t\twriter.writerow(i)", "def persist_list_to_csv(liste, nom_fichier):\n with open(nom_fichier, 'w') as f:\n for elem in liste :\n f.write(\"{}\\n\".format(e...
[ "0.7663665", "0.75958973", "0.74424946", "0.7424865", "0.7408895", "0.7338322", "0.7334891", "0.73170066", "0.72265005", "0.71845925", "0.7141257", "0.7123623", "0.71039116", "0.70734674", "0.70368683", "0.7021018", "0.6982247", "0.6972423", "0.6959962", "0.69387776", "0.6937...
0.6904354
23
Write array to a file as text or binary (default).
def quick_save_array(data, file_name, delimiter=',', ): data.tofile(file_name, sep=delimiter)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_txt(data, file_path):\n array = sanitise_array(data)\n\n # If the data is floating then format the values in scientific notation.\n if np.issubdtype(array.dtype, np.floating):\n array = array.astype(np.float32)\n formatter = lambda x: f'{x:.12E}'\n elif np.issubdtype(array.dtype,...
[ "0.7022295", "0.66680944", "0.6556973", "0.65567935", "0.6454246", "0.64344746", "0.6400705", "0.6375197", "0.6342715", "0.62929696", "0.6282288", "0.62203467", "0.61905587", "0.61702776", "0.611481", "0.61135733", "0.6070423", "0.6067515", "0.6062281", "0.60030717", "0.59931...
0.6797532
1
Get the data from the dataset and apply preprocessing to extract hours.
def get_data(file_name): csv_file = open(file_name, 'rb') train_content = csv.reader(csv_file) # ignore header train_content.next() # preprocessing functions for each column index # Several preprocessing can be defined for each column. # A new variable is associated to EACH preprocessing function preproc_funcs = {0: ['get_hour']} # Read data from file, store it as an integer data = [] for row in train_content: data_row = [] for n, col in enumerate(row): # if the current column requires preprocessing functions, apply them if preproc_funcs.has_key(n): # Each preprocessing give a new column for preproc_func in preproc_funcs[n]: func = globals().get(preproc_func) data_row.append(int(float(func(col)))) # If no preprocessing, do nothing else: data_row.append(int(float(col))) data.append(data_row) csv_file.close() return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_preprocessing(dataset):\r\n df = pd.read_csv(dataset)\r\n df.head()\r\n df.describe()\r\n df.isnull().sum()\r\n df= df.drop(['instant'], axis=1)\r\n df['dteday'] = pd.to_datetime(df['dteday'].apply(str) + ' ' + df['hr'].apply(str) + ':00:00')\r\n return df"...
[ "0.6901085", "0.6247725", "0.6184876", "0.60731816", "0.6018868", "0.59925467", "0.59080493", "0.5836418", "0.581456", "0.57886934", "0.5735579", "0.5710588", "0.5708006", "0.56987065", "0.5696986", "0.5620522", "0.5603038", "0.55995053", "0.5593089", "0.5587772", "0.5583394"...
0.5502701
29
Get the datetimes from the excel file
def get_datetimes(file_name): csv_file = open(file_name, 'rb') file_content = csv.reader(csv_file) # ignore header file_content.next() datetimes = [] for row in file_content: datetimes.append(row[0]) csv_file.close() return datetimes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dates(file,start,end):\r\n \r\n data = format_data(file)\r\n data = data.loc[start:end,:] \r\n dates = list(data.index)\r\n \r\n return dates", "def read_hours_from_worksheet(sheet_name):\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n ws = ...
[ "0.6484449", "0.5983393", "0.59567344", "0.5858225", "0.5803949", "0.5792485", "0.5662788", "0.56464463", "0.5638216", "0.5618037", "0.5579476", "0.5578673", "0.55383044", "0.5533895", "0.5525505", "0.55171365", "0.55086786", "0.54955643", "0.54917186", "0.5489937", "0.548516...
0.63838965
1
Write the predictions in the filename according to Kaggle format.
def write_predictions(pred, filename="pred.csv"): output_file = open(filename, "wb") writer = csv.writer(output_file) datetimes = get_datetimes("test.csv") writer.writerow(["datetime", "count"]) for index, count in enumerate(pred): writer.writerow([datetimes[index], int(count)]) output_file.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_pred_kaggle_file(cls, outfname, speech):\n yp = cls.predict(speech.test_doc_vec)\n labels = speech.le.inverse_transform(yp)\n f = codecs.open(outfname, 'w')\n f.write(\"FileIndex,Category\\n\")\n for i in range(len(speech.test_fnames)):\n fname = speech.test_fnames[i]\n f.wri...
[ "0.79836786", "0.7524809", "0.7184881", "0.7017586", "0.7017586", "0.7015609", "0.6925001", "0.68141526", "0.67422944", "0.6684592", "0.6629441", "0.6625324", "0.66196924", "0.66170734", "0.6532637", "0.65058", "0.65011615", "0.64889866", "0.64260775", "0.63849616", "0.635989...
0.6097874
38
Return RMSLE from the prediction and the expected answer.
def get_RMSLE(pred, truth): assert len(pred) == len(truth) diff_vect = np.log(pred + 1) - np.log(truth + 1) diff_sum = np.sum(np.power(diff_vect, 2)) return np.sqrt(diff_sum / len(pred))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rmsle(actual, predicted, *args, **kwargs):\n return np.sqrt(msle(actual, predicted))", "def RMSLE(prediction, real):\n logarithmic_error = np.log1p(prediction) - np.log1p(real)\n score = np.sqrt(1/len(real) *np.sum(logarithmic_error**2))\n return score", "def rmsle(y_true, y_pred):\n assert ...
[ "0.7609951", "0.7355589", "0.7203462", "0.718389", "0.6820581", "0.64459103", "0.6406715", "0.6388028", "0.6317563", "0.6281459", "0.6191574", "0.60744154", "0.6051396", "0.6048326", "0.60155445", "0.59951115", "0.59895235", "0.5938883", "0.59192955", "0.5914614", "0.5897343"...
0.7670679
0
Return the path of the Bohrium systemwide configuration file
def config_file(self): return join_path(self.prefix.etc.bohrium, "config.ini")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n con...
[ "0.7627292", "0.7395096", "0.73803973", "0.72431105", "0.71532726", "0.7132535", "0.71078205", "0.7089271", "0.70785385", "0.70739216", "0.7015473", "0.7002854", "0.69692624", "0.6962831", "0.6919271", "0.6863968", "0.68277985", "0.6817259", "0.6779527", "0.675732", "0.675009...
0.8231032
0
Create a new model from raw data, like word frequencies, Brown clusters and word vectors.
def init_model(lang, output_dir, freqs_loc=None, clusters_loc=None, vectors_loc=None, prune_vectors=-1): if freqs_loc is not None and not freqs_loc.exists(): prints(freqs_loc, title=Messages.M037, exits=1) clusters_loc = ensure_path(clusters_loc) vectors_loc = ensure_path(vectors_loc) probs, oov_prob = read_freqs(freqs_loc) if freqs_loc is not None else ({}, -20) vectors_data, vector_keys = read_vectors(vectors_loc) if vectors_loc else (None, None) clusters = read_clusters(clusters_loc) if clusters_loc else {} nlp = create_model(lang, probs, oov_prob, clusters, vectors_data, vector_keys, prune_vectors) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) return nlp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_model(self, documents):\n self.vectorizer = TfidfVectorizer(\n stop_words='english', lowercase=True).fit(documents)\n self.vectors = self.vectorizer.transform(documents)", "def build_model_from_inputs(self):\n if self.term_list is None:\n # no supplied token l...
[ "0.6835612", "0.6641352", "0.66403395", "0.6555825", "0.6487551", "0.6304003", "0.6169526", "0.61496544", "0.6141949", "0.61404395", "0.6100387", "0.6087156", "0.606636", "0.6064309", "0.60637945", "0.60066015", "0.5956803", "0.5955455", "0.59401214", "0.592966", "0.5922185",...
0.0
-1
Generate a list of ranks that get harder to obtain as they approach the maximum
def generate_ranks(maximum: int, steps: int) -> List[int]: ranks = [] for i in range(steps): ranks += [maximum] maximum = int(maximum * 0.75) RANK_CUTOFFS = list(reversed(ranks)) return RANK_CUTOFFS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))", "def resolveTie(self, hand_ranking, tie_list):\n max_rank_list = [] \n\n for i in range(5):\n \"\"\" Lowest rank card as baseline \"\"\"\n curr_max_rank = 0 \n for pl...
[ "0.673976", "0.67047715", "0.6614033", "0.6538433", "0.6468819", "0.63788843", "0.63333786", "0.63292336", "0.62728506", "0.62722045", "0.62637275", "0.6252744", "0.6246774", "0.6220911", "0.621737", "0.6209077", "0.618397", "0.6162317", "0.6145225", "0.6116494", "0.61029476"...
0.76253545
0
Get the rank for a given number of points
def get_rank(points: int, cutoffs: List[int]) -> int: rank = 0 for i, cutoff in enumerate(cutoffs): if points < cutoff: if i == 0: break else: rank = i - 1 break else: rank = RANK_COUNT - 1 return rank
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rank(self, points):\n sql_command = \"SELECT * FROM points WHERE amount > ?;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [points])\n\n all = cursor.fetchall()\n cursor.close()\n connection.close()\n return len(all) + 1", "def rank():...
[ "0.77205133", "0.7398081", "0.7045258", "0.70051664", "0.6996832", "0.6941575", "0.6934668", "0.6901205", "0.6886502", "0.6873956", "0.68593204", "0.68586534", "0.6857211", "0.684011", "0.68172395", "0.68114007", "0.68016917", "0.6763181", "0.6714917", "0.6704349", "0.6667565...
0.76279044
1
Yield successive nsized chunks from l.
def chunks(l, n): for i in range(0, len(l), n): yield l[i:i + n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _chunk(self, l, n):\n for i in range(0, len(l) + 1, n):\n yield l[i:i + n]", "def chunks(self, l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def __chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]", "def get_chunks(self, ...
[ "0.8038751", "0.7924752", "0.7923082", "0.78850657", "0.78771824", "0.7815519", "0.77652335", "0.775546", "0.77440816", "0.77313656", "0.77284646", "0.77244157", "0.770246", "0.76888484", "0.76888484", "0.766384", "0.7656689", "0.7655605", "0.7655605", "0.76455575", "0.764555...
0.7537613
27
Check if at top or bottoom and move target
def update_target(self): self.check_top() self.check_bottom() self.update() self.screen.fill(self.target_color, self.rect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_top(self):\n\t\tif self.rect.top <=0:\n\t\t\tself.target_direction = 1", "def move_towards(self, target_x, target_y, game_map, entities):\n path = game_map.compute_path(self.x, self.y, target_x, target_y)\n\n dx = path[0][0] - self.x\n dy = path[0][1] - self.y\n\n if game_m...
[ "0.7896249", "0.6593043", "0.64728004", "0.6443826", "0.64085305", "0.6375471", "0.6318698", "0.6313725", "0.6297276", "0.6276037", "0.6267289", "0.62628496", "0.62597704", "0.6249121", "0.62366766", "0.62253517", "0.61971354", "0.61686295", "0.6122896", "0.6106136", "0.61015...
0.6234633
15
moves the target further out as a % of the screen
def move_target(self, distance_adjustment): self.x = float(self.screen_rect.right - self.width) self.x = self.x * distance_adjustment self.rect.x = self.x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assign_upLimit():\r\n player.rect.y = 25", "def assign_downLimit():\r\n player.rect.y = 100", "def update_target(self):\n\t\tself.check_top()\n\t\tself.check_bottom()\n\t\tself.update()\n\t\tself.screen.fill(self.target_color, self.rect)", "def move(self):\n if random.random() < 0.5:\n ...
[ "0.66254646", "0.65339065", "0.65005153", "0.64133626", "0.6320612", "0.6276562", "0.6270425", "0.62666744", "0.62145644", "0.6178224", "0.6135446", "0.61301184", "0.6129795", "0.61164856", "0.6073802", "0.6068467", "0.6010894", "0.6008223", "0.5989418", "0.5989335", "0.59844...
0.68412036
0
Checks top to target to see if it hit top of screen
def check_top(self): if self.rect.top <=0: self.target_direction = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_autos_top(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tfor auto in self.autos.sprites():\n\t\t\tif auto.rect.top <= screen_rect.top:\n\t\t\t\t# Treat this the same as if the pigeon got hit.\n\t\t\t\tself._pigeon_hit()\n\t\t\t\tbreak", "def hits_top_or_bottom(self):\n if self.y >= self....
[ "0.733528", "0.73351", "0.6623284", "0.6466618", "0.6390728", "0.62321746", "0.6225023", "0.62215257", "0.6199803", "0.61606985", "0.6129186", "0.610839", "0.6086093", "0.6061291", "0.6013783", "0.598041", "0.5974214", "0.5962553", "0.58782667", "0.58218175", "0.58168215", ...
0.8144298
0
Move the target up and down
def update(self): self.y += (self.settings.target_speed * self.target_direction) self.rect.y = self.y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def up(self):\n self.move(0, 1)", "def move_up(self):\n self.move_step(-1)", "def up(self):\n self.forward(MOVE_DISTANCE)", "def do_up(self, arg):\r\n moveDirection('up')", "def move_up(self):\n self.move_measurement(-1)", "def move_up(self):\r\n if self.rect.top...
[ "0.7641986", "0.75890297", "0.75058055", "0.7483884", "0.7452509", "0.7446572", "0.73249847", "0.7263192", "0.71093076", "0.7087285", "0.70684254", "0.7036653", "0.7012248", "0.6975011", "0.69614774", "0.6866236", "0.6853183", "0.6853097", "0.68411994", "0.68393564", "0.68263...
0.6171909
100
Start random direction movement
def go(self): # if we want to go to the right, we need to decrease x and increase y # if we want to go to the left, we need to increase x and decrease y h = random.randrange(2, 4) v = random.randrange(1, 3) if not bool(random.getrandbits(1)): h = - h self.velocity = [h, -v] self.explode.play()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setRandDirection(self):\n phi = 2*math.pi*random.random()\n u = 2*random.random() - 1\n v = math.sqrt(1-u*u)*math.cos(phi)\n w = math.sqrt(1-u*u)*math.sin(phi)\n self.direction = (u,v,w)", "def random_move(turtle, distance):\n angle = uniform(-90,90)\n d = uniform(0,dista...
[ "0.76126647", "0.7555828", "0.7380981", "0.7263461", "0.71452343", "0.7137276", "0.69984514", "0.6989068", "0.6987881", "0.69832087", "0.6966477", "0.6943634", "0.6943634", "0.6928557", "0.6794044", "0.67522436", "0.6749555", "0.6610872", "0.6600256", "0.6551586", "0.6535305"...
0.6732395
17
Change movement speed of the actor
def change_velocity(self, delta): self.velocity += delta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self):\n self.position += self.speed", "def set_speed(self, new_speed):\n self.__x_speed, self.__y_speed = new_speed", "def movementSpeedModifier(self):\n return 0", "def walk(self):\n self.speed = self.speed + (0.2 * self.legs)", "def move_turtle(self):\n self.f...
[ "0.7516971", "0.7302032", "0.7290268", "0.7254367", "0.7236787", "0.7232281", "0.7176142", "0.7162881", "0.7147804", "0.7146328", "0.7095998", "0.7064958", "0.7046144", "0.69934225", "0.69770086", "0.6971563", "0.6969466", "0.69632304", "0.69601864", "0.6959585", "0.6946217",...
0.6720854
37
Change the position of the agent to show movement
def update_position(self): self.position[0] += self.velocity[0] self.position[1] += self.velocity[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self):\n \n self.position = self.wander()", "def movement(self):", "def move(self):\n possible_steps = self.model.grid.get_neighborhood(\n self.pos,\n moore=False, # implements Von Neumann neighborhood\n include_center=False)\n new_position = s...
[ "0.7571383", "0.7284361", "0.7184241", "0.7150989", "0.71362656", "0.7057185", "0.7012484", "0.6970319", "0.6948752", "0.688544", "0.6796978", "0.6755326", "0.674048", "0.6710682", "0.6673721", "0.66386414", "0.66232866", "0.66104656", "0.6609451", "0.6608374", "0.6564065", ...
0.0
-1
Dissapear on horizontal collision and bounce on vertical
def is_collided_vertical(self): # bounce of vertical borders -> y-axis-check if self.position[1] <= config['globals']['BALL_RADIUS']: self.velocity[1] *= -1 elif self.position[1] >= config['globals']['HEIGHT'] + 1 - config['globals']['BALL_RADIUS']: self.velocity[1] *= -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_collide(self):\r\n if self.get_overlapping_sprites():\r\n self.dx = -self.dx", "def _alienCollide(self):\n for b in self._bolts:\n if self._ship != None and self._ship.collides(b):\n self._ship = None\n self._bolts = []\n ...
[ "0.7171883", "0.69743687", "0.6876112", "0.68310064", "0.6821696", "0.6743633", "0.6663485", "0.6639343", "0.6614661", "0.66114783", "0.6596486", "0.65865046", "0.65635604", "0.65623677", "0.6554627", "0.65210336", "0.6513835", "0.6488594", "0.6458591", "0.6441815", "0.643996...
0.61831194
41
Setup strategies to use by the validator. These strategies can be provided
def _using(*args, validator: "DictValidator") -> "DictValidator": def setup_strategy(validator, strategy) -> "DictValidator": if isinstance(strategy, SortingStrategy): validator.sorting = strategy elif isinstance(strategy, FilteringStrategy): validator.filtering = strategy elif isinstance(strategy, PrintingStrategy): validator.printing = strategy else: raise CertumException("The strategy provided for the validator is unknown.") return validator for arg in args: if isinstance(arg, list): for strategy in arg: validator = setup_strategy(validator, strategy) elif isinstance(arg, Strategy): validator = setup_strategy(validator, arg) else: raise CertumException("The strategy provided for the validator is unknown.") return validator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_default_strategies(self, fleetmanager_strategy, transport_strategy, customer_strategy, directory_strategy,\n station_strategy):\n self.fleetmanager_strategy = load_class(fleetmanager_strategy)\n self.transport_strategy = load_class(transport_strategy)\n se...
[ "0.6532167", "0.5933181", "0.55383205", "0.5475055", "0.5461148", "0.54593635", "0.54482967", "0.5417319", "0.5391583", "0.53656155", "0.53485143", "0.5343126", "0.5342159", "0.5331955", "0.5308945", "0.53017586", "0.5301568", "0.5298491", "0.5298491", "0.5286031", "0.526086"...
0.60849404
1
Target a value following the path 'self.path' inside the dictionary.
def _target(path: List[Any], dictionary: Dict[str, Any]) -> Any: if not path: return dictionary current = dictionary for key in path: try: current = current[key] except KeyError as error: path = " -> ".join(path) raise CertumException(f"The path '{path}' doesn't exist") from error return current
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_path(self, key, value):\n return set_path(self, key, self.from_obj(value))", "def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)...
[ "0.66520655", "0.618119", "0.6123845", "0.60922164", "0.60772496", "0.59788", "0.59782517", "0.595644", "0.5857021", "0.5857021", "0.5855367", "0.58048785", "0.57281256", "0.5721013", "0.5697208", "0.5649894", "0.5646482", "0.5641645", "0.564031", "0.5637099", "0.56196165", ...
0.5476351
28
Records the vote note, we drop down and need to commit this transaction manually since we need to read, compute, and then write a new value. This will not work with mysql ISAM tables, so if you are using mysql, it is highly recommended to change this table to InnoDB to support transactions using
def record_vote(request): result = "success" try: rating, created = Rating.objects.get_or_create(key=request.POST['id']) key = request.POST['id'] ip = request.META['REMOTE_ADDR'] event, newevent = RatingEvent.objects.get_or_create(key=key,ip=ip) if not newevent: event.is_changing = True event.old_value = event.value event.value = int(request.POST['vote']) rating.add_rating(event) rating.save() event.save() result = "%s/5 rating ( %s votes)" % (rating.avg_rating, rating.total_votes) except: transaction.rollback() result = 'error' else: transaction.commit() return HttpResponse(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self)->None:\n database.cursor.execute(\"INSERT INTO votes(question,user_id,value) VALUES(%s,%s,%s) RETURNING id\", (\n self.question,\n self.user,\n self.value\n ))\n super().save()", "def record_vote(self, obj, user, vote):\r\n if vote not i...
[ "0.65922546", "0.65765816", "0.6408348", "0.631297", "0.62766236", "0.61878365", "0.5687511", "0.5568119", "0.55055785", "0.54527164", "0.5442264", "0.5370488", "0.53575575", "0.5343209", "0.5315146", "0.5259986", "0.52259654", "0.5187528", "0.5185984", "0.5178056", "0.516777...
0.64803696
2
This is an ADMM solver for the (Latent variable) Single Graphical Lasso problem (SGL). If ``latent=False``, this function solves
def ADMM_SGL(S, lambda1, Omega_0, Theta_0=np.array([]), X_0=np.array([]), rho=1., max_iter=1000, tol=1e-7, rtol=1e-4, stopping_criterion='boyd',\ update_rho=True, verbose=False, measure=False, latent=False, mu1=None): assert Omega_0.shape == S.shape assert S.shape[0] == S.shape[1] assert lambda1 > 0 assert stopping_criterion in ["boyd", "kkt"] if latent: assert mu1 is not None assert mu1 > 0 (p, p) = S.shape assert rho > 0, "ADMM penalization parameter must be positive." # initialize Omega_t = Omega_0.copy() if len(Theta_0) == 0: Theta_0 = Omega_0.copy() if len(X_0) == 0: X_0 = np.zeros((p, p)) Theta_t = Theta_0.copy() L_t = np.zeros((p, p)) X_t = X_0.copy() runtime = np.zeros(max_iter) residual = np.zeros(max_iter) status = '' if verbose: print("------------ADMM Algorithm for Single Graphical Lasso----------------") if stopping_criterion == 'boyd': hdr_fmt = "%4s\t%10s\t%10s\t%10s\t%10s" out_fmt = "%4d\t%10.4g\t%10.4g\t%10.4g\t%10.4g" print(hdr_fmt % ("iter", "r_t", "s_t", "eps_pri", "eps_dual")) elif stopping_criterion == 'kkt': hdr_fmt = "%4s\t%10s" out_fmt = "%4d\t%10.4g" print(hdr_fmt % ("iter", "kkt residual")) ################################################################## ### MAIN LOOP STARTS ################################################################## for iter_t in np.arange(max_iter): if measure: start = time.time() # Omega Update W_t = Theta_t - L_t - X_t - (1 / rho) * S eigD, eigQ = np.linalg.eigh(W_t) Omega_t_1 = Omega_t.copy() Omega_t = phiplus(beta=1 / rho, D=eigD, Q=eigQ) # Theta Update Theta_t = prox_od_1norm(Omega_t + L_t + X_t, (1 / rho) * lambda1) # L Update if latent: C_t = Theta_t - X_t - Omega_t # C_t = (C_t.T + C_t)/2 eigD1, eigQ1 = np.linalg.eigh(C_t) L_t = prox_rank_norm(C_t, mu1/rho, D=eigD1, Q=eigQ1) # X Update X_t = X_t + Omega_t - Theta_t + L_t if measure: end = time.time() runtime[iter_t] = end - start # Stopping criterion if stopping_criterion == 'boyd': r_t,s_t,e_pri,e_dual = ADMM_stopping_criterion(Omega_t, Omega_t_1, Theta_t, L_t, X_t,\ S, rho, tol, rtol, latent) # update rho if update_rho: if r_t >= 10*s_t: rho_new = 2*rho elif s_t >= 10*r_t: rho_new = 0.5*rho else: rho_new = 1.*rho # rescale dual variables X_t = (rho/rho_new)*X_t rho = rho_new residual[iter_t] = max(r_t,s_t) if verbose: print(out_fmt % (iter_t,r_t,s_t,e_pri,e_dual)) if (r_t <= e_pri) and (s_t <= e_dual): status = 'optimal' break elif stopping_criterion == 'kkt': eta_A = kkt_stopping_criterion(Omega_t, Theta_t, L_t, rho * X_t, S, lambda1, latent, mu1) residual[iter_t] = eta_A if verbose: print(out_fmt % (iter_t,eta_A)) if eta_A <= tol: status = 'optimal' break ################################################################## ### MAIN LOOP FINISHED ################################################################## # retrieve status (partially optimal or max iter) if status != 'optimal': if stopping_criterion == 'boyd': if (r_t <= e_pri): status = 'primal optimal' elif (s_t <= e_dual): status = 'dual optimal' else: status = 'max iterations reached' else: status = 'max iterations reached' print(f"ADMM terminated after {iter_t+1} iterations with status: {status}.") ### CHECK FOR SYMMETRY if abs((Omega_t).T - Omega_t).max() > 1e-5: warnings.warn(f"Omega variable is not symmetric, largest deviation is {abs((Omega_t).T - Omega_t).max()}.") if abs((Theta_t).T - Theta_t).max() > 1e-5: warnings.warn(f"Theta variable is not symmetric, largest deviation is {abs((Theta_t).T - Theta_t).max()}.") if abs((L_t).T - L_t).max() > 1e-5: warnings.warn(f"L variable is not symmetric, largest deviation is {abs((L_t).T - L_t).max()}.") ### CHECK FOR POSDEF D = np.linalg.eigvalsh(Theta_t - L_t) if D.min() <= 0: print( f"WARNING: Theta (Theta - L resp.) is not positive definite. Solve to higher accuracy! (min EV is {D.min()})") if latent: D = np.linalg.eigvalsh(L_t) if D.min() < -1e-8: print(f"WARNING: L is not positive semidefinite. Solve to higher accuracy! (min EV is {D.min()})") if latent: sol = {'Omega': Omega_t, 'Theta': Theta_t, 'L': L_t, 'X': X_t} else: sol = {'Omega': Omega_t, 'Theta': Theta_t, 'X': X_t} if measure: info = {'status': status, 'runtime': runtime[:iter_t+1], 'residual': residual[:iter_t+1]} else: info = {'status': status} return sol, info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solver_mll(X, y, C, S, alpha=0.1, max_iter=1000, tol=1e-4, positive=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False, positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n old_theta =...
[ "0.5904229", "0.56168336", "0.5549464", "0.5510236", "0.54761505", "0.54374045", "0.5416033", "0.53865016", "0.5377905", "0.5372089", "0.53265786", "0.5315447", "0.52730715", "0.5262824", "0.52390164", "0.52042484", "0.5179361", "0.5165328", "0.51650435", "0.5147611", "0.5143...
0.65866923
0
This is a wrapper for solving SGL problems on connected components of the solution and solving each block separately. See Witten, Friedman, Simon "New Insights for the Graphical Lasso" for details. It solves
def block_SGL(S, lambda1, Omega_0, Theta_0=None, X_0=None, rho=1., max_iter=1000, tol=1e-7, rtol=1e-3, stopping_criterion="boyd", update_rho=True, verbose=False, measure=False): assert Omega_0.shape == S.shape assert S.shape[0] == S.shape[1] assert lambda1 > 0 (p, p) = S.shape if Theta_0 is None: Theta_0 = Omega_0.copy() if X_0 is None: X_0 = np.zeros((p, p)) # compute connected components of S with lambda_1 threshold numC, allC = get_connected_components(S, lambda1) allOmega = list() allTheta = list() allX = list() for i in range(numC): C = allC[i] # single node connected components have a closed form solution, see Witten, Friedman, Simon "NEW INSIGHTS FOR THE GRAPHICAL LASSO " if len(C) == 1: # we use the OFF-DIAGONAL l1-penalty, otherwise it would be 1/(S[C,C]+lambda1) closed_sol = 1 / (S[C, C]) allOmega.append(closed_sol) allTheta.append(closed_sol) allX.append(np.array([0])) # else solve Graphical Lasso for the corresponding block else: block_S = S[np.ix_(C, C)] block_sol, block_info = ADMM_SGL(S=block_S, lambda1=lambda1, Omega_0=Omega_0[np.ix_(C, C)], Theta_0=Theta_0[np.ix_(C, C)], X_0=X_0[np.ix_(C, C)], tol=tol, rtol=rtol, stopping_criterion=stopping_criterion, update_rho=update_rho, rho=rho, max_iter=max_iter, verbose=verbose, measure=measure) allOmega.append(block_sol['Omega']) allTheta.append(block_sol['Theta']) allX.append(block_sol['X']) # compute inverse permutation per = np.hstack(allC) per1 = invert_permutation(per) # construct solution by applying inverse permutation indexing sol = dict() sol['Omega'] = block_diag(*allOmega)[np.ix_(per1, per1)] sol['Theta'] = block_diag(*allTheta)[np.ix_(per1, per1)] sol['X'] = block_diag(*allX)[np.ix_(per1, per1)] return sol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gel_solve(\n A,\n y,\n l_1,\n l_2,\n ns,\n b_init=None,\n block_solve_fun=block_solve_agd,\n block_solve_kwargs=None,\n max_cd_iters=None,\n rel_tol=1e-6,\n Cs=None,\n Is=None,\n verbose=False,\n):\n p = len(A)\n m = len(y)\n device = A[0].device\n dtype = A[0...
[ "0.6437674", "0.63575566", "0.63160455", "0.6253777", "0.6242397", "0.61891377", "0.6080901", "0.6075275", "0.60319287", "0.6008029", "0.5985438", "0.5956081", "0.5951685", "0.5910511", "0.5889266", "0.588267", "0.5834767", "0.58335656", "0.58294004", "0.5818063", "0.5789487"...
0.66944
0
The argument p is assumed to be some permutation of 0, 1, ..., len(p)1. Returns an array s, where s[i] gives the index of i in p.
def invert_permutation(p): s = np.empty_like(p) s[p] = np.arange(p.size) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perm_invert(p):\n q = [None] * len(p)\n for i, j in enumerate(p):\n q[j] = i\n return q", "def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())", "def permute_2d(m, p):\r\n return m[p][:, p]\r\n # unused below\r\n m_t = transpose(m)\r\n r_t =...
[ "0.6614275", "0.5957587", "0.58996975", "0.5898854", "0.58252364", "0.574952", "0.57436204", "0.5726088", "0.57004726", "0.5664967", "0.5657052", "0.56268764", "0.55960995", "0.55443096", "0.55430853", "0.5538524", "0.550933", "0.55085063", "0.5503721", "0.5452929", "0.543458...
0.7273526
0
Initialize a HOOMD device given the parse arguments.
def make_hoomd_device(args): if args.device == 'CPU': device = hoomd.device.CPU() elif args.device == 'GPU': device = hoomd.device.GPU() else: raise ValueError(f'Invalid device {args.device}.') if not args.verbose: device.notice_level = 0 return device
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media...
[ "0.62113965", "0.608058", "0.59696805", "0.5943475", "0.59068257", "0.583213", "0.5805206", "0.5683287", "0.5648386", "0.56437355", "0.56389016", "0.56138045", "0.5604826", "0.5592298", "0.5573388", "0.55678135", "0.55641365", "0.5561535", "0.55565435", "0.5537574", "0.553698...
0.6631796
0
Override this method to initialize the simulation.
def make_simulation(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_simulation(self) -> Simulation:\n pass", "def initialise_sim(self):\n pass", "def _setup_simulation(self\n ) -> None:\n pass", "def __init__(self, simulator):\r\n self.initialize(simulator)", "def setup_simulation(self, **kwargs):\n\n ...
[ "0.8976463", "0.8452447", "0.83930016", "0.7892265", "0.7457877", "0.73970324", "0.7365948", "0.73107266", "0.7242487", "0.71671295", "0.7144739", "0.7124746", "0.7005026", "0.6902338", "0.68967086", "0.68967086", "0.68967086", "0.68929464", "0.68929464", "0.68929464", "0.689...
0.774349
4
Get the performance of the benchmark during the last ``run``.
def get_performance(self): return self.sim.tps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def benchmark_result(self):\n return self._benchmark_id", "def retrieve( self, benchmark, extraLabel='' ):\n if benchmark.reference is ReferenceBenchmark.SP:\n idx = np.argmax(...
[ "0.72080225", "0.6449681", "0.6247628", "0.6000796", "0.5958153", "0.5957309", "0.59554595", "0.58821696", "0.5833112", "0.58005464", "0.5773313", "0.574518", "0.57411474", "0.57227314", "0.5712661", "0.56783533", "0.56588525", "0.5634061", "0.55844593", "0.5574638", "0.55643...
0.5574966
19
Run the benchmark for the given number of steps.
def run(self, steps): self.sim.run(steps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, steps = 1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps=1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, ste...
[ "0.6977534", "0.68559444", "0.68559444", "0.6314905", "0.6159092", "0.60951483", "0.603625", "0.59564173", "0.5941269", "0.59056836", "0.59001297", "0.5890866", "0.58668673", "0.58663905", "0.58295834", "0.5819737", "0.5806568", "0.5786354", "0.5773823", "0.5768865", "0.57342...
0.64317936
3
Execute the benchmark and report the performance.
def execute(self): print_verbose_messages = (self.verbose and self.device.communicator.rank == 0) # Ensure that all ops are attached (needed for is_tuning_complete). self.run(0) if print_verbose_messages: print(f'Running {type(self).__name__} benchmark') if print_verbose_messages: print(f'.. warming up for {self.warmup_steps} steps') self.run(self.warmup_steps) if (isinstance(self.device, hoomd.device.GPU) and hasattr(self.sim.operations, 'is_tuning_complete')): while not self.sim.operations.is_tuning_complete: if print_verbose_messages: print('.. autotuning GPU kernel parameters for ' f'{self.warmup_steps} steps') self.run(self.warmup_steps) if print_verbose_messages: print(f'.. running for {self.benchmark_steps} steps ' f'{self.repeat} time(s)') # benchmark performance = [] if isinstance(self.device, hoomd.device.GPU): with self.device.enable_profiling(): for i in range(self.repeat): self.run(self.benchmark_steps) performance.append(self.get_performance()) if print_verbose_messages: print(f'.. {performance[-1]} {self.units}') else: for i in range(self.repeat): self.run(self.benchmark_steps) performance.append(self.get_performance()) if print_verbose_messages: print(f'.. {performance[-1]} {self.units}') return performance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n be...
[ "0.7920896", "0.68559283", "0.67580193", "0.6716297", "0.66414857", "0.65965855", "0.6546384", "0.65423447", "0.65336007", "0.6459301", "0.6339556", "0.6312379", "0.62926507", "0.6286289", "0.623686", "0.622832", "0.61696357", "0.61670846", "0.6156982", "0.6118324", "0.611785...
0.7640157
1
Make an ArgumentParser instance for benchmark options.
def make_argument_parser(): parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, choices=['CPU', 'GPU'], help='Execution device.', required=True) parser.add_argument('-N', type=int, default=DEFAULT_N, help='Number of particles.') parser.add_argument('--rho', type=float, default=DEFAULT_RHO, help='Number density.') parser.add_argument('--dimensions', type=int, choices=[2, 3], help='Number of dimensions.', default=DEFAULT_DIMENSIONS) parser.add_argument('--warmup_steps', type=int, default=DEFAULT_WARMUP_STEPS, help='Number of timesteps to run before timing.') parser.add_argument('--benchmark_steps', type=int, default=DEFAULT_BENCHMARK_STEPS, help='Number of timesteps to run in the benchmark.') parser.add_argument('--repeat', type=int, default=DEFAULT_REPEAT, help='Number of times to repeat the run.') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output.') return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_argument_parser():\n parser = Benchmark.make_argument_parser()\n parser.add_argument('--skip-reference',\n action='store_true',\n help='Skip the reference simulation run.')\n return parser", "def arg_parser(cls):\n parser ...
[ "0.7983817", "0.7168031", "0.70897955", "0.70692337", "0.7047852", "0.6983982", "0.693783", "0.6929585", "0.6902446", "0.689302", "0.6882429", "0.6879765", "0.68780637", "0.68780637", "0.6868575", "0.6855423", "0.68533826", "0.6851654", "0.68308103", "0.6825723", "0.678807", ...
0.7317018
1
Implement the command line entrypoint for benchmarks.
def main(cls): parser = cls.make_argument_parser() args = parser.parse_args() args.device = make_hoomd_device(args) benchmark = cls(**vars(args)) performance = benchmark.execute() if args.device.communicator.rank == 0: print(f'{numpy.mean(performance)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n logging.basicConfig(level=\"INFO\")\n assert len(sys.argv) == 2, \"Exactly one positional argument (path to the raw dataset) is \"\\\n \"needed. \\n\\nE.g. `python sparsity_benchmark ~/bff_data/final_table`\"\n\n # Prepares data for the benchmark, may take a while\n data_parameters...
[ "0.7575131", "0.736125", "0.7116626", "0.69657815", "0.6858612", "0.677702", "0.6768471", "0.6749162", "0.673816", "0.67165655", "0.6619266", "0.66101116", "0.65930367", "0.6569376", "0.65495706", "0.65495706", "0.6527374", "0.6446763", "0.6446198", "0.63803476", "0.632811", ...
0.6749548
7
Call make_simulations and return the first simulation.
def make_simulation(self): if self.skip_reference: self.units = 'time steps per second' else: self.units = 'calls per second' self.reference_sim, self.compare_sim = self.make_simulations() return self.reference_sim
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_simulations(self):\n pass", "def get_simulation(self, _id):\n\n simulation = self.collection.find_one({'_id': ObjectId(_id)})\n\n return simulation", "def fixture_sim():\n\tEXAMPLE_FILE_FOLDER = str(MODULE_DIR) + \"/data/nail1/\"\n\tsim = read.load_sim(EXAMPLE_FILE_FOLDER)\n\tretu...
[ "0.6396689", "0.63829195", "0.6235438", "0.6127556", "0.6113671", "0.6075556", "0.592617", "0.5924301", "0.5908774", "0.5887714", "0.5857056", "0.58460796", "0.58178735", "0.5798798", "0.57717663", "0.57685995", "0.5756951", "0.5755669", "0.5740458", "0.57304794", "0.57215303...
0.6316884
2
Run the benchmark for the given number of steps.
def run(self, steps): if not self.skip_reference: self.reference_sim.run(steps) self.compare_sim.run(steps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, steps = 1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, steps=1000):\n for step in range(steps):\n if self.is_done():\n return\n self.step()", "def run(self, ste...
[ "0.6976785", "0.6855227", "0.6855227", "0.6431256", "0.631405", "0.6156152", "0.60947746", "0.60345435", "0.5957139", "0.5937996", "0.59057015", "0.5897424", "0.58912295", "0.5865059", "0.582705", "0.58178043", "0.58033776", "0.5785402", "0.57739943", "0.5768402", "0.573326",...
0.58645546
14
Override this method to initialize the simulations.
def make_simulations(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_sim(self):\n pass", "def initialize_simulation(self) -> Simulation:\n pass", "def _setup_simulation(self\n ) -> None:\n pass", "def __init__(self, simulator):\r\n self.initialize(simulator)", "def make_simulation(self):\n pass", "...
[ "0.8679043", "0.8295332", "0.75881743", "0.7474434", "0.72223103", "0.7014883", "0.70072925", "0.69853646", "0.6959237", "0.69506794", "0.6913647", "0.687302", "0.6830056", "0.6807677", "0.6807346", "0.6783988", "0.67358565", "0.6696126", "0.66808563", "0.6670654", "0.6643305...
0.7676313
2
Get the benchmark performance.
def get_performance(self): if self.skip_reference: return self.compare_sim.tps # Avoid divide by zero errors when the simulation is not executed. if self.reference_sim.tps == 0: return 0 t0 = 1 / self.reference_sim.tps t1 = 1 / self.compare_sim.tps return 1 / (t1 - t0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())", "def get_performance(self):\n return self.sim.tps", "def get_speedtest():\n\n if(DEBUG):\n print(\"Gathering speedtest results...\", flush=True)\n\n s = Speedtest()\n s....
[ "0.7716694", "0.6812372", "0.66633004", "0.64185244", "0.640215", "0.6386745", "0.6361457", "0.6321419", "0.6289639", "0.6282566", "0.6282566", "0.6264686", "0.6244597", "0.6237336", "0.62242454", "0.621317", "0.62114346", "0.62091786", "0.6123424", "0.61178464", "0.61099523"...
0.61632097
18
Make an ArgumentParser instance for comparative benchmark options.
def make_argument_parser(): parser = Benchmark.make_argument_parser() parser.add_argument('--skip-reference', action='store_true', help='Skip the reference simulation run.') return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device',\n type=str,\n choices=['CPU', 'GPU'],\n help='Execution device.',\n required=True)\n pa...
[ "0.72827846", "0.7190032", "0.7093924", "0.7032623", "0.70229095", "0.69860554", "0.6946884", "0.694275", "0.693303", "0.6930106", "0.69170386", "0.68352914", "0.6832233", "0.68094486", "0.68094486", "0.6783497", "0.6778075", "0.6772473", "0.675373", "0.6753382", "0.6745362",...
0.788973
0
Max secondary depth based on modelshift secondary test from Jeff Coughlin
def modelshift_weaksec(koi): num = KOIDATA.ix[ku.koiname(koi), 'koi_tce_plnt_num'] if np.isnan(num): num = 1 kid = KOIDATA.ix[ku.koiname(koi), 'kepid'] tce = '{:09.0f}-{:02.0f}'.format(kid,num) #return largest depth between DV detrending and alternate detrending try: r = ROBOVETDATA.ix[tce] except KeyError: raise NoWeakSecondaryError(koi) depth_dv = r['mod_depth_sec_dv'] * (1 + 3*r['mod_fred_dv'] / r['mod_sig_sec_dv']) depth_alt = r['mod_depth_sec_alt'] * (1 + 3*r['mod_fred_alt'] / r['mod_sig_sec_alt']) logging.debug(r[['mod_depth_sec_dv','mod_fred_dv','mod_sig_sec_dv']]) logging.debug(r[['mod_depth_sec_alt','mod_fred_alt','mod_sig_sec_alt']]) if np.isnan(depth_dv) and np.isnan(depth_alt): #return weaksec_vv2(koi) raise NoWeakSecondaryError(koi) elif np.isnan(depth_dv): return depth_alt elif np.isnan(depth_alt): return depth_dv else: return max(depth_dv, depth_alt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_depth_val():\n data = SUNRGBDTrainDataset(True)\n return max([data[0][i][-1].flatten().item() for i in range(len(data))])", "def estimate_maxm_neutral_axis_depth(self):\r\n\t\txu_d = abs(self.concrete.max_compresive_strain)/\\\r\n\t\t\t(abs(self.concrete.max_compresive_strain) + self.steel.max_...
[ "0.6801275", "0.6496621", "0.6444544", "0.62129825", "0.6179012", "0.60663015", "0.59735364", "0.5784729", "0.5781161", "0.5756161", "0.5747669", "0.573456", "0.5719743", "0.56770504", "0.5676199", "0.56210965", "0.5609796", "0.5604197", "0.55953497", "0.55722934", "0.5571747...
0.59691375
7
Applies default secthresh & exclusion radius constraints
def apply_default_constraints(self): try: self.apply_secthresh(pipeline_weaksec(self.koi)) except NoWeakSecondaryError: logging.warning('No secondary eclipse threshold set for {}'.format(self.koi)) self.set_maxrad(default_r_exclusion(self.koi))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def constraints(self):\n ...", "def _discretize(self, constraints_object):\n pass", "def objects_radius(self, centre, radius):", "def apply_constraint(self):\n\t\tself.angle = self.constraint(self.angle) % 360", "def cutout(self, centre, radius):", "def constraints(self, x):\n pass",...
[ "0.57216144", "0.5593304", "0.5487926", "0.5459716", "0.5433389", "0.5411653", "0.5399157", "0.529492", "0.5289383", "0.528124", "0.52704936", "0.52636516", "0.5226623", "0.52207905", "0.52052534", "0.51941854", "0.51933926", "0.5185518", "0.51653844", "0.51490223", "0.514120...
0.6335594
0
Returns true if provenance of property is SPE or AST
def use_property(kepid, prop): try: prov = kicu.DATA.ix[kepid, '{}_prov'.format(prop)] return any([prov.startswith(s) for s in ['SPE', 'AST']]) except KeyError: raise MissingStellarError('{} not in stellar table?'.format(kepid))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isprop(v):\n return isinstance(v, property)", "def isproperty(object):\n return isinstance(object, property)", "def is_simple(self):\n return self.propertyValueType.lower() in ('float', 'double',\n 'int', 'integer',\n ...
[ "0.6570999", "0.6159433", "0.562184", "0.5580678", "0.55424774", "0.54955554", "0.54840237", "0.5463147", "0.5448263", "0.53668535", "0.5354428", "0.53308886", "0.5306961", "0.53069216", "0.5291205", "0.52911955", "0.52770704", "0.52570504", "0.5237203", "0.5223409", "0.51899...
0.6637285
0
returns star config object for given KOI
def star_config(koi, bands=['g','r','i','z','J','H','K'], unc=dict(g=0.05, r=0.05, i=0.05, z=0.05, J=0.02, H=0.02, K=0.02), **kwargs): folder = os.path.join(KOI_FPPDIR, ku.koiname(koi)) if not os.path.exists(folder): os.makedirs(folder) config = ConfigObj(os.path.join(folder,'star.ini')) koi = ku.koiname(koi) maxAV = koi_maxAV(koi) config['maxAV'] = maxAV mags = ku.KICmags(koi) for band in bands: if not np.isnan(mags[band]): config[band] = (mags[band], unc[band]) config['Kepler'] = mags['Kepler'] kepid = KOIDATA.ix[koi,'kepid'] if use_property(kepid, 'teff'): teff, e_teff = (kicu.DATA.ix[kepid, 'teff'], kicu.DATA.ix[kepid, 'teff_err1']) if not any(np.isnan([teff, e_teff])): config['Teff'] = (teff, e_teff) if use_property(kepid, 'logg'): logg, e_logg = (kicu.DATA.ix[kepid, 'logg'], kicu.DATA.ix[kepid, 'logg_err1']) if not any(np.isnan([logg, e_logg])): config['logg'] = (logg, e_logg) if use_property(kepid, 'feh'): feh, e_feh = (kicu.DATA.ix[kepid, 'feh'], kicu.DATA.ix[kepid, 'feh_err1']) if not any(np.isnan([feh, e_feh])): config['feh'] = (feh, e_feh) for kw,val in kwargs.items(): config[kw] = val return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fpp_config(koi, **kwargs):\n folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))\n if not os.path.exists(folder):\n os.makedirs(folder)\n config = ConfigObj(os.path.join(folder,'fpp.ini'))\n\n koi = ku.koiname(koi)\n\n rowefit = jrowe_fit(koi)\n\n config['name'] = koi\n ra,dec = ku.r...
[ "0.60090053", "0.5942401", "0.5920423", "0.5912436", "0.5829198", "0.55174524", "0.55119944", "0.54936326", "0.5490124", "0.5460084", "0.54068804", "0.5390276", "0.5330812", "0.5309186", "0.53012615", "0.52819467", "0.52716434", "0.526362", "0.52570665", "0.52288973", "0.5213...
0.6899527
0
returns config object for given KOI
def fpp_config(koi, **kwargs): folder = os.path.join(KOI_FPPDIR, ku.koiname(koi)) if not os.path.exists(folder): os.makedirs(folder) config = ConfigObj(os.path.join(folder,'fpp.ini')) koi = ku.koiname(koi) rowefit = jrowe_fit(koi) config['name'] = koi ra,dec = ku.radec(koi) config['ra'] = ra config['dec'] = dec config['rprs'] = rowefit.ix['RD1','val'] config['period'] = rowefit.ix['PE1', 'val'] config['starfield'] = kepler_starfield_file(koi) for kw,val in kwargs.items(): config[kw] = val config['constraints'] = {} config['constraints']['maxrad'] = default_r_exclusion(koi) try: config['constraints']['secthresh'] = pipeline_weaksec(koi) except NoWeakSecondaryError: pass return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif...
[ "0.64626646", "0.6120666", "0.6065999", "0.6041188", "0.6032176", "0.599006", "0.5987609", "0.5959099", "0.59461635", "0.59364283", "0.5936055", "0.59172744", "0.59112185", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476", "0.5904476",...
0.6510853
0
Predict a single batch of images, optionally with augmentation. Augmentations vectorized across the entire batch and predictions averaged.
def predict_batch(self, imgs_batch, augment=False): if augment: aug_funcs = [ lambda x: x, # identity lambda x: x[:, ::-1, ...], # vlip lambda x: x[:, :, ::-1], # hflip lambda x: np.rot90(x, 1, axes=(1, 2)), # +90 lambda x: np.rot90(x, 2, axes=(1, 2)), # +180 lambda x: np.rot90(x, 3, axes=(1, 2)), # +270 lambda x: np.rot90(x, 1, axes=(1, 2))[:, ::-1, ...], # vflip(+90) lambda x: np.rot90(x, 1, axes=(1, 2))[:, :, ::-1] # vflip(+90) ] yp = np.zeros((imgs_batch.shape[0], len(TAGS))) for aug_func in aug_funcs: imgs_batch = aug_func(imgs_batch) tags_batch = self.net.predict(imgs_batch) yp += tags_batch / len(aug_funcs) return yp else: return self.net.predict_on_batch(imgs_batch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, images, batch_size):\n pass", "def warmup_predict(model, imgs, Npred):\n H = augmented_state_matrix(model[:-1], imgs, 0)\n h0 = H[-2]\n y0 = imgs[-1]\n return predict(model, y0, h0, Npred)", "def predict_on_batch(self, input_batch):\n from deeplift.util import run_functi...
[ "0.7239248", "0.7074474", "0.69302636", "0.68597704", "0.6841375", "0.6764959", "0.6649414", "0.6614922", "0.6554689", "0.6516499", "0.64637536", "0.64612246", "0.6373398", "0.63642263", "0.63606596", "0.63237506", "0.6289094", "0.62845325", "0.6273538", "0.62653995", "0.6237...
0.8082307
0
Unstack batch dimension and split into channels and alpha mask.
def unstack_and_split(self, x, batch_size, num_channels=3): unstacked = torch.reshape(x, [batch_size, -1] + list(x.shape)[1:]) channels, masks = torch.split(unstacked, [num_channels, 1], dim=2) return channels, masks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reshape_channels(x):\n assert x.dim() == 4\n batch_size, nc, h, w = x.size()\n x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()\n x_t = x_t.view(batch_size, h, w, nc)\n return x_t", "def batch_collate_fn(batch):\n images = []\n masks = []\n \n for ...
[ "0.59909207", "0.58123934", "0.5716505", "0.5716326", "0.5704696", "0.5700815", "0.55700964", "0.5564052", "0.55450964", "0.55180305", "0.551707", "0.54962254", "0.5416754", "0.5411667", "0.5395286", "0.5373396", "0.53642625", "0.5344805", "0.5337699", "0.5336697", "0.5332148...
0.7611791
0
Animals that can speak are correctly identified
def test_animals_can_speak(self): self.assertEqual(self.lion, 'roar') self.assertEqual(self.cat, 'meow')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def animals_by_species(self):\n print self.animal()", "def animal_eats(self):\n self.update_fodder()\n self.herbivore_eats()\n self.carnivore_eats()", "def substantiate():", "def speak(self):\n # Speaks randomly to another agent on the same cell\n anticipated_meaning...
[ "0.5804902", "0.5754538", "0.56921846", "0.56177425", "0.55703396", "0.5568995", "0.5478271", "0.5457741", "0.5398982", "0.53391945", "0.5301369", "0.5275877", "0.52202857", "0.51787615", "0.5157603", "0.51495886", "0.5125665", "0.512152", "0.5121497", "0.5117309", "0.5089538...
0.5837831
0
auto change 'date' onchange of 'x_start_date
def _compute_date_from_x_start_date(self): for ts_line in self: if ts_line.x_start_date: st_datetime = fields.Datetime.from_string( ts_line.x_start_date) # autocomplete date from start date st_date_tz = fields.Datetime.context_timestamp( self, st_datetime).date() ts_line.date = st_date_tz
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onchange_start_date(self, start_date=False):\n if not start_date:\n return {}\n result = {'value': {'last_renovation_date': start_date}}\n return result", "def set_start_date(self, start_date):\n self.set_value_into_input_field(self.start_date_inputbox_locator, start_da...
[ "0.7639792", "0.7559864", "0.733876", "0.701777", "0.7012194", "0.6946314", "0.6936835", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.68256146", "0.67880905", "0.6787431", "0.67594075", "0.67241603", "0.666...
0.7176086
3
auto calculate 'hours' onchange of 'x_start_date or x_end_date
def _compute_duration(self): diff_float = 0 for ts_line in self: if ts_line.x_start_date: st_datetime = fields.Datetime.from_string( ts_line.x_start_date) # autocomplete date from start date st_date_tz = fields.Datetime.context_timestamp( self, st_datetime).date() ts_line.date = st_date_tz # autocomplete name from start date st_datetime_tz = fields.Datetime.context_timestamp( self, st_datetime) string_st_dt_tz = fields.Datetime.to_string(st_datetime_tz) ts_line.name = ts_line.user_id.name + '/' + string_st_dt_tz en_datetime = fields.Datetime.from_string( ts_line.x_end_date) diff = en_datetime - st_datetime if(time(1, 00) <= st_datetime.time() <= time(5, 00)): if(time(6, 00) <= en_datetime.time() <= time(10, 00)): # del 1 hour for breaking lunch diff_float = round(diff.total_seconds() / 3600.0, 2)-1 else: diff_float = round(diff.total_seconds() / 3600.0, 2) ts_line.unit_amount = diff_float
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_number_of_hours(self):\n if self.date_to:\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(self.date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(self.date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_d...
[ "0.598755", "0.5943536", "0.58988446", "0.5735438", "0.57317203", "0.5616081", "0.55189914", "0.54957277", "0.54912084", "0.538175", "0.5366651", "0.53358847", "0.53107077", "0.53098565", "0.5307729", "0.52965957", "0.5288884", "0.52459323", "0.5241022", "0.52161556", "0.5204...
0.5383492
9
auto calculate 'hours' onchange of 'is_overtime
def _compute_duration_overtime(self): diff_float = 0 for ts_line in self: if ts_line.x_start_date: st_datetime = fields.Datetime.from_string( ts_line.x_start_date) en_datetime = fields.Datetime.from_string( ts_line.x_end_date) diff = en_datetime - st_datetime if not ts_line.is_overtime: if(time(1, 00) <= st_datetime.time() <= time(5, 00)): if(time(6, 00) <= en_datetime.time() <= time(10, 00)): # del 1 hour for breaking lunch diff_float = round( diff.total_seconds() / 3600.0, 2)-1 else: diff_float = round(diff.total_seconds() / 3600.0, 2) ts_line.x_is_per_diem = False ts_line.unit_amount = diff_float
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hours(input=None):\n return get(input).hours", "def calculate_hours(time):\n return int(time / 3600)", "def interval_hours(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval_hours\")", "def _get_number_of_hours(self):\n if self.date_to:\n DATETIME_FO...
[ "0.67112166", "0.6592598", "0.6250287", "0.61557996", "0.61208427", "0.594246", "0.59240556", "0.5915791", "0.59038764", "0.5896075", "0.5838429", "0.58366513", "0.5824198", "0.5824198", "0.5824198", "0.5824198", "0.5765874", "0.5749549", "0.57289326", "0.57184255", "0.569036...
0.55416137
31
Given a urlsafe version of an Album key, get the actual key
def get_album_key_by_keystr(keystr): attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.' if not keystr or not isinstance(keystr, basestring): raise RuntimeError(attr_err) key = ndb.Key(urlsafe=keystr) if not key.kind() == PHOTOALBUM_KIND: raise RuntimeError(kind_err % (PHOTOALBUM_KIND, key.kind())) return key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_album_key(slug):\n err = 'Series slug must be defined and of of type basestring'\n\n if not slug or not isinstance(slug, basestring):\n raise RuntimeError(err)\n\n return ndb.Key(PHOTOALBUM_KIND, slug)", "def get_key_from_url(file_url):\t\n\tparts = urlparse(file_url)\n\tbucket_name = get...
[ "0.7271961", "0.64886624", "0.6453254", "0.62893945", "0.6282472", "0.62622005", "0.6248456", "0.62307674", "0.61798584", "0.6131501", "0.60555077", "0.60446566", "0.60090566", "0.59870636", "0.5964662", "0.5961178", "0.59544635", "0.5880653", "0.5861731", "0.58558726", "0.58...
0.76117
0
Create a ndb.Key given an Album slug
def get_album_key(slug): err = 'Series slug must be defined and of of type basestring' if not slug or not isinstance(slug, basestring): raise RuntimeError(err) return ndb.Key(PHOTOALBUM_KIND, slug)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_key(cls, song_id):\n return ndb.Key(cls, song_id)", "def get_album_key_by_keystr(keystr):\n attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr\n kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.'\n if not keystr or no...
[ "0.6371879", "0.6166366", "0.5936699", "0.58009845", "0.5732777", "0.55733705", "0.55266577", "0.5525588", "0.5469047", "0.53936213", "0.538625", "0.53846484", "0.5360333", "0.5352547", "0.534155", "0.5337662", "0.5337662", "0.53374225", "0.5337322", "0.53343934", "0.533107",...
0.797927
0
Given an album slug, fetch the album entity
def get_album_by_slug(slug): album_key = get_album_key(slug) album = album_key.get() return album
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_album(album_id):\n return query_single(album_id, Album, album_schema)", "def album(self, q, page=None):\r\n return self.get('album', q, page)", "def album(self, uri, detail=None):\r\n extras = self.ALBUM_DETAIL.get(detail)\r\n return self.get(uri, extras)", "def get_album(self...
[ "0.75652915", "0.70452404", "0.67959744", "0.65954405", "0.65209013", "0.64783317", "0.6462776", "0.6453332", "0.643563", "0.64184994", "0.62658083", "0.6256562", "0.6231396", "0.6121484", "0.6022898", "0.60085446", "0.59636325", "0.5959914", "0.59453434", "0.592426", "0.5915...
0.80606824
0
Fetch a list of Albums
def get_album_list(): # TODO: Paginate this, etc entities = PhotoAlbum.query().order(-PhotoAlbum.title).fetch(1000) return entities
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)", "def get_albums(self):\n artist = self.get_request_arg(\"artist\")\n if artist:\n lib = self.ctrl.library\n lst = sorted(self.ctrl.library.get_alb...
[ "0.8165235", "0.77157474", "0.7463277", "0.7329539", "0.7285775", "0.7235554", "0.71896297", "0.7168777", "0.71157223", "0.7096026", "0.7090436", "0.70471865", "0.70387936", "0.6880528", "0.68339896", "0.6679008", "0.66074675", "0.6587003", "0.65148", "0.6513741", "0.64734447...
0.81510186
1
Attempts a Redis DB connection and returns the DB Object
def dbConnect(self): r = redis.StrictRedis() try: r = redis.from_url(os.environ.get("REDIS_URL")) print("DB Connection seems okay!") except Exception as error: print ("Oops! An exception has occured:", error) print ("Exception TYPE:", type(error)) r = None finally: return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_to_db(self):\n r = redis.Redis(host=self.hostname,\n port=self.portnumber,\n password=self.password)\n try:\n r.ping()\n except redis.ConnectionError:\n sys.exit('ConnectionError: is the redis-server running?')\n ...
[ "0.77405405", "0.77253574", "0.7679741", "0.7658954", "0.7619239", "0.74647945", "0.7445185", "0.74369836", "0.74027646", "0.73723024", "0.73014516", "0.723885", "0.7207661", "0.7056827", "0.70409024", "0.7014019", "0.6989931", "0.6985283", "0.69585145", "0.69442034", "0.6850...
0.8348194
0
Converts short URL to an ID
def shortURLToId(self, shortURL): id = 0 for i in shortURL: val_i = ord(i) if(val_i >= ord('a') and val_i <= ord('z')): id = id*62 + val_i - ord('a') elif(val_i >= ord('A') and val_i <= ord('Z')): id = id*62 + val_i - ord('Z') + 26 else: id = id*62 + val_i - ord('0') + 52 return id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(...
[ "0.76266074", "0.75862736", "0.71807855", "0.71714944", "0.7136487", "0.7108624", "0.70438015", "0.7000727", "0.6966817", "0.69377244", "0.6835616", "0.6831397", "0.6816086", "0.67460054", "0.668461", "0.6674245", "0.6609759", "0.66091233", "0.65995246", "0.65338206", "0.6514...
0.86196995
0
Converts ID to a short URL
def encodeUrl(self, id): characters = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" # base = 62 base = len(characters) ret = [] while id > 0: val = id % base ret.append(characters[val]) id = id // base # reverse and return return "".join(ret[::-1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))", "def encode(shorturl_id: int) -> str:\n short_resource = []\n while shorturl_id > 0:\n character_index = shorturl_id % BASE\...
[ "0.80629903", "0.774184", "0.7362125", "0.7206839", "0.71768415", "0.7142918", "0.70756847", "0.70104337", "0.6962931", "0.69154733", "0.68603855", "0.67419654", "0.66957", "0.6651168", "0.6630139", "0.6607956", "0.6597487", "0.6586758", "0.6553414", "0.6496325", "0.64962417"...
0.6996009
8
Returns original and encoded/shortened url as output
def processUrl(self, original_url): red = self.dbConnect() original_url=str(original_url) print("ORIGINAL URL: " + original_url) # check set to see if it is an existing url if red.sismember('URL_SET', original_url): print("Same URL mapping already exists, let's find that...") # return the existing url for key in red.scan_iter(): if key.decode('utf-8') not in ['URL_SET', 'counter_value']: print("Checking Key: " + str(key)) curr_val = red.get(key).decode('UTF-8') print("Checking Value: " + str(curr_val)) if curr_val == original_url: print("Found Mapping: " + str(key) + " => " + str(curr_val) ) return key.decode('UTF-8'), red.ttl(key) print("No Mapping found, something is wrong...") print("Possibly a manual deletion") print("Adding the URL mapping again...") # if not found or if it is a new url - do the following # add to cache, update counter print("Adding the new URL to redis cache...") counter_seq = self.getAndUpdateCounter() encoded_url = self.encodeUrl(counter_seq) print("ENCODED URL: " + str(encoded_url)) print("NEW COUNTER VALUE: " + str(counter_seq)) red.set(encoded_url, original_url) # adding an expiry expiry_time = timedelta(days=days_to_live) print("Setting an expiry of " + str(expiry_time.days) + " days for the URL.") red.expire(encoded_url, expiry_time) # add this to a global set for quick lookup red.sadd('URL_SET', original_url) return encoded_url, red.ttl(encoded_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, longUrl):\n if longUrl not in self.long_to_short:\n short = self.get_short(longUrl)\n self.short_to_long[short] = longUrl\n self.long_to_short[longUrl] = short\n return 'http://tinyurl.com/' + short", "def encode(self, longUrl):\n shortUr...
[ "0.7102593", "0.70894444", "0.7065672", "0.7008331", "0.6796581", "0.67095864", "0.66524667", "0.66465366", "0.6601956", "0.65962195", "0.6586476", "0.6553012", "0.654763", "0.6544104", "0.6534512", "0.6532423", "0.650703", "0.64639336", "0.646128", "0.64406013", "0.64365625"...
0.0
-1
Returns original and shortened url as output Invoked to redirect
def redirectUrl(self, encoded_url): red = self.dbConnect() if red.exists(encoded_url): print("This looks like a valid short URL") return str(red.get(encoded_url).decode('UTF-8')) else: print("This is not a valid short URL") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect(url):", "def link_redirect(request, shortened_url: str):\n try:\n url = Url.objects.get(short_url=shortened_url)\n long_url = url.long_url\n return HttpResponseRedirect(long_url)\n except Url.DoesNotExist or TypeError:\n return HttpResponseBadRequest(\"Wrong url\")"...
[ "0.7246121", "0.6872797", "0.6835433", "0.65044236", "0.64537114", "0.64230764", "0.6415941", "0.6406983", "0.6309532", "0.6266381", "0.62487316", "0.6248407", "0.62424415", "0.62124103", "0.61925703", "0.6182394", "0.61781794", "0.6178157", "0.6178157", "0.6160361", "0.61357...
0.6364458
8
Returns the counter and increments by 1
def getAndUpdateCounter(self): red = self.dbConnect() curr_counter=0 if 'counter_value' in red: curr_counter = int(red.get('counter_value').decode('UTF-8')) print("incrementing counter...") print("older value: " + str(curr_counter)) red.set('counter_value', curr_counter + 1) else: # just an arbitrary value red.set('counter_value', 14433) return curr_counter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def increment_counter(self) -> None:", "def counter(self) -> int:", "def counter(self) -> int:", "def inc( self ):\n self.count += 1", "def inc(self):\n \n self.count += 1", "def increase_counter(self):\n self.values = self.values + 1", "def add_count(self):\n self.co...
[ "0.88919264", "0.8598626", "0.8598626", "0.82559425", "0.81963813", "0.7784121", "0.7672979", "0.7593635", "0.7538767", "0.75163466", "0.74550295", "0.7404312", "0.73930556", "0.7383696", "0.7365757", "0.73226464", "0.7267104", "0.72100943", "0.71966606", "0.71851814", "0.715...
0.6944701
35
Lists all keys in redis db cache
def listAll(self): red = self.dbConnect() return red.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all():\n # results = [String.from_dict(redis.hgetall(key)) for key in redis.keys() if key != 'index']\n results = []\n for key in redis_store.keys(String.generate_key('*')):\n data = pickle.loads(redis_store.get(key))\n string = String(data['key']).deserialize(data)\n...
[ "0.7487752", "0.7252233", "0.7232625", "0.7188551", "0.7036857", "0.70209503", "0.69742924", "0.6873124", "0.68472815", "0.6803024", "0.67941684", "0.6720403", "0.6652056", "0.6624582", "0.6621683", "0.660927", "0.65792507", "0.6553939", "0.65026474", "0.64909124", "0.6487889...
0.7005117
6
Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init, or override this method in a subclass.
def get_optimizers( self, num_training_steps ): if self.optimizers is not None: return self.optimizers # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": self.args.weight_decay, }, { "params": [ p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] if self.args.optimizer == "sgd": optimizer = SGD(optimizer_grouped_parameters, lr=self.args.learning_rate, momentum=self.args.sgd_momentum, \ weight_decay=self.args.weight_decay) elif self.args.optimizer == "adam": optimizer = AdamW( optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon) if self.args.lr_schedule == "constant": scheduler = get_constant_schedule_with_warmup( optimizer, num_warmup_steps=self.args.warmup_steps) elif self.args.lr_schedule == "linear": scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps) elif self.args.lr_schedule == "invsqrt": scheduler = get_invsqrt_schedule_with_warmup( optimizer, num_warmup_steps=self.args.warmup_steps) return optimizer, scheduler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_optimizers(self):\n allowed = list(OPTIM_LOOKUP.keys())\n if self.optimizer not in allowed:\n raise ValueError(\n f\"Illegal optimizer given. Got {self.optimizer}. Allowed: {allowed}.\"\n )\n\n allowed = list(SCHED_LOOKUP.keys())\n if s...
[ "0.7660184", "0.7521174", "0.7476974", "0.7435311", "0.7368797", "0.72984624", "0.72553366", "0.7180905", "0.715223", "0.7137306", "0.7121896", "0.71121275", "0.70854396", "0.7073759", "0.70551753", "0.700759", "0.70071536", "0.69621265", "0.69621265", "0.69529146", "0.691341...
0.6385241
79
Close the session and the Thrift transport.
def close(self): # PEP 249 rpc.close_service(self.service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self):\n self.__session.close()", "def close(self):\n self.__session.clos...
[ "0.78195184", "0.78195184", "0.78195184", "0.78195184", "0.78195184", "0.78195184", "0.7808701", "0.7711172", "0.76763135", "0.7660848", "0.7508555", "0.7420375", "0.7418072", "0.7413811", "0.7404994", "0.732449", "0.7322094", "0.722286", "0.7220323", "0.71922815", "0.712453"...
0.0
-1
Impala doesn't support transactions; does nothing.
def commit(self): # PEP 249 pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def supports_transactions(self):\n return self._mysql_storage_engine != \"MyISAM\"", "def runSqlNoTransaction(self, sql):\r\n self.c.autocommit = True\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n self.c.commit()\r\n cursor.close()\r\n self.c.autocommit...
[ "0.6690196", "0.6536858", "0.65303385", "0.65088254", "0.63948905", "0.6361204", "0.63229173", "0.6194685", "0.6192952", "0.61433756", "0.61367714", "0.61094934", "0.60980386", "0.6026481", "0.6020292", "0.6011731", "0.59969383", "0.5978559", "0.5967903", "0.5959607", "0.5950...
0.0
-1
Impala doesn't support transactions; raises NotSupportedError
def rollback(self): # PEP 249 raise impala.error.NotSupportedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def supports_transactions(self):\n return self._mysql_storage_engine != \"MyISAM\"", "def supports_transactions(self):\n return False", "def with_transaction(session, f):\n try:\n f(session)\n session.commit()\n except Exception as e:\n session.rollback()\n raise...
[ "0.66796213", "0.63022", "0.5864729", "0.5848233", "0.58128023", "0.57294697", "0.5692588", "0.56807727", "0.5673939", "0.56446296", "0.5611186", "0.55624133", "0.55621827", "0.5557179", "0.5543098", "0.55403894", "0.54963696", "0.54826725", "0.5479581", "0.54793453", "0.5419...
0.59182566
2
Checks connection to server by requesting some info from the server.
def ping(self): return rpc.ping(self.service, self.session_handle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_connection(self):\n for _ in range(3):\n try:\n r = get(f\"http://{self.ip}/student/{self.user}\")\n if r.ok:\n break \n except OSError as e:\n print(f\"Connection error:\\n{e}\")\n sleep(2)\n ...
[ "0.74695706", "0.7439458", "0.7408965", "0.7146001", "0.7071619", "0.6893049", "0.67745674", "0.6694355", "0.6692175", "0.66580856", "0.6639965", "0.6628134", "0.66238683", "0.6612052", "0.6602828", "0.6601591", "0.65890414", "0.65394765", "0.65368456", "0.6516861", "0.651257...
0.0
-1
probably the wrost way to parse this captcha
def get_captcha_reply(captcha): def get_char_at(pos, captcha): char_chars = [line[pos-1:pos] for line in captcha.split(b'\n')] key = ''.join([ str(s, 'ascii') for s in char_chars]) if key == ' | ': return get_char_at(pos+2, captcha) if key == ' | .\\ ': return get_char_at(pos+2, captcha) return chars[key] pos = 1 a, size = get_char_at(pos, captcha) pos += size pwn.log.info("a=%d" % a) op, size = get_char_at(pos, captcha) pos += size pwn.log.info('op=%s' % op) b, size = get_char_at(pos, captcha) pos += size pwn.log.info('b=%d' % b) if op == '-': return a - b if op == '*': return a * b if op == '/': return a / b if op == '+': return a + b pwn.log.error("Ops not found (%s)" % op)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_captcha(self):\n res = self._limited_call(self._requests.get,\n constants.FA_ROOT + \"/captcha.jpg\")\n data = res.content\n return data", "def handle_captcha(self):\n self.webdriver.save_screenshot('./out/captcha.png')\n sleep(20)\n\n ...
[ "0.62265563", "0.6219252", "0.61916035", "0.6096624", "0.60874146", "0.59667766", "0.59021354", "0.58705884", "0.5825633", "0.5809599", "0.57755667", "0.5746426", "0.5594208", "0.5538595", "0.55344874", "0.55010104", "0.5449904", "0.53967845", "0.53909343", "0.5288578", "0.52...
0.67830896
0
The set of arguments for constructing a Environment resource.
def __init__(__self__, *, application_name: pulumi.Input[str], cname_prefix: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, environment_name: Optional[pulumi.Input[str]] = None, operations_role: Optional[pulumi.Input[str]] = None, option_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentOptionSettingArgs']]]] = None, platform_arn: Optional[pulumi.Input[str]] = None, solution_stack_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]] = None, template_name: Optional[pulumi.Input[str]] = None, tier: Optional[pulumi.Input['EnvironmentTierArgs']] = None, version_label: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, "application_name", application_name) if cname_prefix is not None: pulumi.set(__self__, "cname_prefix", cname_prefix) if description is not None: pulumi.set(__self__, "description", description) if environment_name is not None: pulumi.set(__self__, "environment_name", environment_name) if operations_role is not None: pulumi.set(__self__, "operations_role", operations_role) if option_settings is not None: pulumi.set(__self__, "option_settings", option_settings) if platform_arn is not None: pulumi.set(__self__, "platform_arn", platform_arn) if solution_stack_name is not None: pulumi.set(__self__, "solution_stack_name", solution_stack_name) if tags is not None: pulumi.set(__self__, "tags", tags) if template_name is not None: pulumi.set(__self__, "template_name", template_name) if tier is not None: pulumi.set(__self__, "tier", tier) if version_label is not None: pulumi.set(__self__, "version_label", version_label)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cookiecutter_args(self) -> dict[str, str]:\n local_args = {\n \"add_golden\": \"y\" if self.golden_tests else \"n\",\n \"copyright_holder\": self.copyright_holder,\n \"copyright_year\": (\n self.today.strftime(\"%Y\")\n if not self.copyright...
[ "0.61305785", "0.61261475", "0.6043725", "0.6030461", "0.5991547", "0.59570146", "0.58903545", "0.58040184", "0.5788831", "0.5748878", "0.5733352", "0.5728621", "0.5659946", "0.5640539", "0.56395596", "0.5638727", "0.5636724", "0.5635059", "0.56332546", "0.56139416", "0.56015...
0.5898593
6
The name of the application that is associated with this environment.
def application_name(self) -> pulumi.Input[str]: return pulumi.get(self, "application_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def app_name(self) -> str:\n return self._app_name", "def app_name(self):\n return self._app_name", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"a...
[ "0.8759173", "0.8733051", "0.8620485", "0.8620485", "0.8601236", "0.8559315", "0.8510323", "0.8470934", "0.83924884", "0.8365387", "0.81169367", "0.80879086", "0.8058957", "0.80493975", "0.80454445", "0.80223936", "0.8003628", "0.79939234", "0.7801216", "0.7618629", "0.748491...
0.83143616
13
If specified, the environment attempts to use this value as the prefix for the CNAME in your Elastic Beanstalk environment URL. If not specified, the CNAME is generated automatically by appending a random alphanumeric string to the environment name.
def cname_prefix(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "cname_prefix")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_env_prefix(instrument):\n return \"crds://\"", "def create_r53_name ( base_name, name ) :\n env = get_env_type( base_name )\n if env :\n env = env.lower( )\n if ( env == 'prod' ) :\n return name\n\n return name + '.' + env", "def env_name(self):\n return f\"{self.project_nam...
[ "0.64996815", "0.5894681", "0.5874034", "0.5585658", "0.5544347", "0.54832244", "0.5323281", "0.5320591", "0.52773994", "0.52695954", "0.5262594", "0.5224099", "0.5203325", "0.5185465", "0.51792514", "0.51767623", "0.51628804", "0.5161422", "0.51356816", "0.5127793", "0.51247...
0.52725095
9
Your description for this environment.
def description(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "description")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe(self):\r\n print( self.name + \" is here!\" )\r\n print( self.description )", "def description(self):\n pass", "def description(self):\n pass", "def description():", "def description(self) -> str:\n pass", "def describe(self):\n print(self.descriptio...
[ "0.7659912", "0.7643785", "0.7643785", "0.75917745", "0.7568413", "0.75415725", "0.75415725", "0.7419379", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "0.73063093", "...
0.0
-1
A unique name for the environment.
def environment_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "environment_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self):\n return self._env_name", "def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)", "def env_name(self):\n return f\"{self.project_...
[ "0.79486114", "0.75685275", "0.7347741", "0.71612895", "0.7083821", "0.6932168", "0.6866546", "0.6833046", "0.6826362", "0.6653989", "0.66474885", "0.658179", "0.6504934", "0.6459993", "0.64504695", "0.64480555", "0.6429557", "0.6411307", "0.6345184", "0.63397086", "0.6339165...
0.69573826
5
The Amazon Resource Name (ARN) of an existing IAM role to be used as the environment's operations role.
def operations_role(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "operations_role")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")", "def iam_role_arn(self) -> str:\n return pulumi.get(self, \"iam_role_arn\")", "def role_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"role_arn\")", "def role_arn(self) -> pulumi.Input[str]:\n ...
[ "0.7612605", "0.75487196", "0.7544673", "0.7489435", "0.74707127", "0.74707127", "0.7373255", "0.7373255", "0.734374", "0.734374", "0.734374", "0.734374", "0.72820526", "0.72820526", "0.7252721", "0.71177185", "0.70236653", "0.69610494", "0.6929272", "0.6908325", "0.69070154"...
0.6835086
21
Keyvalue pairs defining configuration options for this environment, such as the instance type.
def option_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentOptionSettingArgs']]]]: return pulumi.get(self, "option_settings")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def define_options(self):\n return {\n 'basename': OptionDef(required=True, default_value='keycloak', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='default', allowed_types=[str]),\n 'config': {\n 'service_port': OptionDef(require...
[ "0.6606235", "0.6575482", "0.65406615", "0.653029", "0.65184087", "0.6499363", "0.64822006", "0.6451657", "0.63914824", "0.63914824", "0.63914824", "0.6329847", "0.6306304", "0.6300337", "0.62472105", "0.6245023", "0.62443054", "0.6222554", "0.6206321", "0.6206157", "0.620615...
0.0
-1
The Amazon Resource Name (ARN) of the custom platform to use with the environment.
def platform_arn(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "platform_arn")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def platform_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"platform_arn\")", "def platform(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"platform\")", "def platform():\n return \"micaz\"", "def PlatformName():\n if override_platform_name:\n return over...
[ "0.7836344", "0.6630474", "0.66273826", "0.6609992", "0.6324445", "0.6285871", "0.623594", "0.61513615", "0.60816205", "0.60486674", "0.60041595", "0.598996", "0.59880674", "0.596188", "0.59613025", "0.5961109", "0.5934884", "0.5923574", "0.5923574", "0.59224755", "0.59224755...
0.77357316
1
The name of an Elastic Beanstalk solution stack (platform version) to use with the environment.
def solution_stack_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "solution_stack_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution_stack_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"solution_stack_name\")", "def stackname(self):\n return self.BASE_NAME.format(**self.conf)", "def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")", "def env_name(self):\n ret...
[ "0.6850209", "0.6835407", "0.6518939", "0.65049684", "0.64019746", "0.6248095", "0.6167335", "0.615564", "0.6142327", "0.6139697", "0.6102911", "0.6087402", "0.60867625", "0.60640186", "0.6010799", "0.59820807", "0.592273", "0.58995575", "0.5882993", "0.58522046", "0.58299625...
0.68761265
0
Specifies the tags applied to resources in the environment.
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvironmentTagArgs']]]]: return pulumi.get(self, "tags")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._tags = tags", "def tags(self, tags):\n\n self._ta...
[ "0.7253586", "0.7253586", "0.7253586", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.71411216", "0.70391154", "0.6818384", "0.6521481", "0.65186703", "0.6484757", "0.6386305", "0.63736135", "0....
0.63703835
21
The name of the Elastic Beanstalk configuration template to use with the environment.
def template_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "template_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_configuration_template(self):\n return CONFIG_TEMPLATE", "def template_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"template_name\")", "def inspect_template_name(self) -> str:\n return pulumi.get(self, \"inspect_template_name\")", "def template(self):\n ret...
[ "0.6748205", "0.65170735", "0.65021986", "0.64853156", "0.6403302", "0.6382873", "0.6355259", "0.6352765", "0.6313939", "0.6291501", "0.6274407", "0.6263015", "0.62534225", "0.61411935", "0.6131308", "0.6107288", "0.6077706", "0.6076934", "0.6012232", "0.6004269", "0.5972697"...
0.62606674
12
Specifies the tier to use in creating this environment. The environment tier that you choose determines whether Elastic Beanstalk provisions resources to support a web application that handles HTTP(S) requests or a web application that handles backgroundprocessing tasks.
def tier(self) -> Optional[pulumi.Input['EnvironmentTierArgs']]: return pulumi.get(self, "tier")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tier(self):\n\n if not hasattr(self, \"_tier\"):\n self._tier = self.opts.get(\"tier\")\n return self._tier", "def tier(self) -> pulumi.Output[Optional['outputs.EnvironmentTier']]:\n return pulumi.get(self, \"tier\")", "def set_tier(self, tier):\n self.single_selectio...
[ "0.69780785", "0.68621033", "0.65023184", "0.6452587", "0.6355082", "0.6355082", "0.6355082", "0.62773496", "0.62122846", "0.6167871", "0.6167871", "0.6167871", "0.6167871", "0.6113269", "0.6034399", "0.60080045", "0.598394", "0.5843236", "0.5833414", "0.5829682", "0.5780073"...
0.7524803
0
The name of the application version to deploy.
def version_label(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "version_label")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")", "def app_version(self) -> str:\n return pulumi.get(self, \"app_version\")", "def version_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"version_name\")", "def get_package_name(self):\n ...
[ "0.79632217", "0.7523307", "0.7298782", "0.7256948", "0.7225796", "0.7173701", "0.7093467", "0.70632595", "0.70144266", "0.691959", "0.6919559", "0.6915445", "0.6907794", "0.6907794", "0.68302757", "0.68228155", "0.67623806", "0.6752896", "0.6749308", "0.6721663", "0.67069286...
0.0
-1
Get an existing Environment resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = EnvironmentArgs.__new__(EnvironmentArgs) __props__.__dict__["application_name"] = None __props__.__dict__["cname_prefix"] = None __props__.__dict__["description"] = None __props__.__dict__["endpoint_url"] = None __props__.__dict__["environment_name"] = None __props__.__dict__["operations_role"] = None __props__.__dict__["option_settings"] = None __props__.__dict__["platform_arn"] = None __props__.__dict__["solution_stack_name"] = None __props__.__dict__["tags"] = None __props__.__dict__["template_name"] = None __props__.__dict__["tier"] = None __props__.__dict__["version_label"] = None return Environment(resource_name, opts=opts, __props__=__props__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EnvironmentArgs.__new__(EnvironmentArgs)\n\n __props__.__dict...
[ "0.6518575", "0.61676663", "0.615199", "0.6109608", "0.6109441", "0.60900664", "0.6072669", "0.5931966", "0.58831435", "0.5837981", "0.5818557", "0.58057237", "0.5786659", "0.5786354", "0.5778798", "0.5712893", "0.56756985", "0.5640747", "0.5597875", "0.5522777", "0.54472834"...
0.6545722
0