query stringlengths 9 9.05k | document stringlengths 10 222k | metadata dict | negatives listlengths 30 30 | negative_scores listlengths 30 30 | document_score stringlengths 4 10 | document_rank stringclasses 2
values |
|---|---|---|---|---|---|---|
upate the balance, sell acount, buy acount, based on returan rate | def _balance_update(self):
return_rate = self.df.loc[self.currentStep, "return_Close"]
self.buy_amount += return_rate * self.buy_amount
self.sell_amount -= return_rate * self.sell_amount | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] i... | [
"0.6785312",
"0.6220181",
"0.6175861",
"0.6114552",
"0.6088199",
"0.60283774",
"0.6012482",
"0.59921443",
"0.5976123",
"0.5962097",
"0.59508103",
"0.5949215",
"0.5946042",
"0.5930534",
"0.5910977",
"0.59003735",
"0.5884636",
"0.58771354",
"0.5839308",
"0.58320624",
"0.5797567... | 0.72528595 | 0 |
Prepare the dict of values to create the new invoice for a sale order. This method may be overridden to implement custom invoice generation (making sure to call super() to establish a clean extension chain). | def _prepare_invoice(self, cr, uid, order, lines, context=None):
invoice_vals = super(my_sale_order, self)._prepare_invoice(cr, uid, order,
lines, context)
invoice_vals.update({
'partner_shipping_id': order.partner_shipping_id.... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _prepare_invoice(self):\n # get current logged in user's timezone\n local = pytz.timezone(self.env['res.users'].browse(self._uid).tz) or pytz.utc\n\n self.ensure_one()\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')], limit=1).id\n if not journal_... | [
"0.74100953",
"0.73465693",
"0.73403376",
"0.7328195",
"0.7002523",
"0.69652605",
"0.68501127",
"0.67724967",
"0.6712132",
"0.665417",
"0.6562301",
"0.6495313",
"0.6490809",
"0.6431741",
"0.6104436",
"0.6072918",
"0.59833795",
"0.59129834",
"0.5863059",
"0.5860928",
"0.578695... | 0.76020634 | 0 |
The function prepares the images for our model based on a given video | def prepare_video(path_to_video: str, number_of_images=87) -> None:
temp_video = path.join(path_to_video, 'temp_outpy.mp4')
video = path.join(path_to_video, 'outpy.h264')
# create mp4 video for metadata and compute video duration
subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])
re... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_train_video(opt, frame_path, Total_frames):\n clip = []\n i = 0\n loop = 0\n\n # choosing a random frame\n if Total_frames <= opt.sample_duration: \n loop = 1\n start_frame = 0\n else:\n start_frame = np.random.randint(0, Total_frames - opt.sample_duration)\n \n ... | [
"0.6378548",
"0.6341571",
"0.6268886",
"0.6267411",
"0.62665486",
"0.62576735",
"0.61747694",
"0.6135613",
"0.61193234",
"0.6107251",
"0.607215",
"0.6004102",
"0.59735906",
"0.5962933",
"0.5939074",
"0.59383684",
"0.59096986",
"0.5904158",
"0.58933836",
"0.5886941",
"0.588352... | 0.6345232 | 1 |
The function convert Euler angle to quaternion object | def euler_to_quaternion(euler: tuple) -> object:
(yaw, pitch, roll) = (euler[0], euler[1], euler[2])
qy = np.sin(roll / 2) * np.cos(pitch / 2) * np.cos(yaw / 2) - np.cos(roll / 2) * np.sin(pitch / 2) * np.sin(yaw / 2)
qx = np.cos(roll / 2) * np.sin(pitch / 2) * np.cos(yaw / 2) + np.sin(roll / 2) * np.cos(p... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def euler_to_quaternion(psi, theta, phi):\n # Abbreviations for the various angular functions\n cy = np.cos(psi * 0.5)\n sy = np.sin(psi * 0.5)\n cp = np.cos(theta * 0.5)\n sp = np.sin(theta * 0.5)\n cr = np.cos(phi * 0.5)\n sr = np.sin(phi * 0.5)\n\n q = np.zeros(4)\n q[0] = cy * cp * c... | [
"0.7751907",
"0.7692476",
"0.7672758",
"0.7591342",
"0.7487414",
"0.74774384",
"0.7452368",
"0.742516",
"0.74124753",
"0.7385367",
"0.7384482",
"0.73697144",
"0.7299849",
"0.72180927",
"0.7195682",
"0.71946454",
"0.7185801",
"0.71458083",
"0.7089422",
"0.70486486",
"0.7017161... | 0.805849 | 0 |
The function write the recovered camera poses according to COLMAP documentation | def write_camera_pose_to_file(camera_pose_abs_dict: dict, pose_dir_path: str) -> None:
image_dst = path.join(pose_dir_path, 'images.txt')
with open(image_dst, 'w+') as file:
file.write('# Image list with two lines of data per image:\n')
file.write('# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMER... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_poses():\n get_marshmallow_pose(should_remember=True)\n get_mouth_pose(should_remember=True)\n rospy.sleep(1)\n print \"Finished saving poses\"",
"def write(self, pathname='wind.png'):\r\n cv2.imwrite(pathname, self.matrix * 255)",
"def writeCameraSettings(self):\n pass",
"... | [
"0.6014014",
"0.5977247",
"0.5700586",
"0.56356347",
"0.56256527",
"0.5615104",
"0.560124",
"0.55990064",
"0.55756223",
"0.5424815",
"0.54085475",
"0.5403678",
"0.5390949",
"0.5362901",
"0.53255045",
"0.53230655",
"0.5303748",
"0.5303022",
"0.5295456",
"0.52894944",
"0.528787... | 0.6023144 | 0 |
Draw an n x n grid with edges / nodes from X in red | def draw_grid(n,X):
G = nx.grid_2d_graph(n+1,n+1)
set_node_colors(G,G.nodes(),'k')
set_edge_colors(G,G.edges(),'k')
set_edge_weights(G,G.edges(),0.5)
set_node_colors(G,edge_subgraph_nodes(X),'r')
set_edge_colors(G,X,'r')
set_edge_weights(G,X,1)
nc = [G.node[n]['color'] for... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_grid(self):\n plt.imshow(py.array(\n map(lambda x: map(lambda y: mplc.colorConverter.to_rgb(colord[y]), x), self.create_grid(self.graph))),\n interpolation='nearest')\n plt.show()",
"def draw_grid(self):\n for i in range(N * N + 1):\n color = \"blue\... | [
"0.6630089",
"0.647417",
"0.6226133",
"0.6224112",
"0.6188585",
"0.6169128",
"0.61569834",
"0.6127831",
"0.61215925",
"0.6074973",
"0.6070254",
"0.606452",
"0.60638255",
"0.6051185",
"0.60291207",
"0.6009064",
"0.600086",
"0.5957386",
"0.59433025",
"0.59421194",
"0.59329593",... | 0.7901583 | 0 |
Only show the debug toolbar to users with the superuser flag. | def _custom_show_toolbar(request: 'HttpRequest') -> bool:
return DEBUG and request.user.is_superuser | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _custom_show_toolbar(request):\n return DEBUG and request.user.is_superuser",
"def show_toolbar(request: HttpRequest) -> bool:\n conditions = (\n settings.DEBUG\n and request.META.get('REMOTE_ADDR', None) in settings.INTERNAL_IPS,\n request.user.is_superuser,\n )\n disqualifi... | [
"0.817702",
"0.67734593",
"0.61041766",
"0.6046557",
"0.5890961",
"0.5890961",
"0.5883483",
"0.5869873",
"0.58142656",
"0.58089703",
"0.5765674",
"0.57116914",
"0.57055",
"0.56569725",
"0.5652845",
"0.5607011",
"0.5505614",
"0.5483127",
"0.5456996",
"0.5456996",
"0.5435421",
... | 0.8088223 | 1 |
Formats the location values separating keys, values and k/v pairs >>> l = Location(42.1, 23.5, "test") | def format_geocommit(self, keyval_separator, entry_separator):
end = entry_separator
sep = keyval_separator
msg = "lat" + sep + str(self.lat) + end
msg += "long" + sep + str(self.long) + end
for attr in self.optional_keys:
if hasattr(self, attr):
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_pair(self, k, v):\n if isinstance(v, int):\n data_width = len(str(v)) + 1\n header_width = len(str(k))\n w = max(data_width, header_width)\n h = ('%% %us'%w)%k\n return ' '*len(h), h, '%%%ud'%w\n elif k=='dt':\n fmt = '%6.3... | [
"0.59635484",
"0.5520702",
"0.5457944",
"0.54207",
"0.5350413",
"0.5347729",
"0.5347187",
"0.5327935",
"0.53164715",
"0.53077406",
"0.5262094",
"0.5207229",
"0.5207229",
"0.51810014",
"0.5175114",
"0.51656723",
"0.5141616",
"0.51369876",
"0.51341087",
"0.51255035",
"0.5115307... | 0.568076 | 1 |
Formats the location using the long geocommit format >>> l = Location(42.1, 23.5, "test") >>> l.format_long_geocommit() | def format_long_geocommit(self):
geocommit = "geocommit (1.0)\n"
geocommit += self.format_geocommit(": ", "\n")
geocommit += "\n\n"
return geocommit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_short_geocommit(self):\r\n geocommit = \"geocommit(1.0): \"\r\n geocommit += self.format_geocommit(\" \", \", \")\r\n geocommit += \";\"\r\n\r\n return geocommit",
"def format_latlon(lat: float, lon: float) -> str:\n if lat < 0:\n latdir = \"S\"\n e... | [
"0.6449494",
"0.5818637",
"0.57956123",
"0.57563007",
"0.5687247",
"0.55729204",
"0.55175114",
"0.5450161",
"0.5328889",
"0.53174734",
"0.5223536",
"0.52226293",
"0.51880616",
"0.5184553",
"0.51536745",
"0.51523906",
"0.5143899",
"0.513625",
"0.5133939",
"0.51274645",
"0.5106... | 0.7905342 | 0 |
Formats the location using the long geocommit format >>> l = Location(42.1, 23.5, "test") >>> l.format_short_geocommit() | def format_short_geocommit(self):
geocommit = "geocommit(1.0): "
geocommit += self.format_geocommit(" ", ", ")
geocommit += ";"
return geocommit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_long_geocommit(self):\r\n geocommit = \"geocommit (1.0)\\n\"\r\n geocommit += self.format_geocommit(\": \", \"\\n\")\r\n geocommit += \"\\n\\n\"\r\n\r\n return geocommit",
"def short_def(self):\r\n return f\"{self.lat}, {self.lon}\"",
"def from_short_format(data):\... | [
"0.78306407",
"0.64534444",
"0.62554103",
"0.61308855",
"0.60158163",
"0.5945876",
"0.5859429",
"0.57603073",
"0.5721214",
"0.55733645",
"0.55531913",
"0.55373216",
"0.55149174",
"0.54258066",
"0.54249513",
"0.54163617",
"0.53930986",
"0.5350871",
"0.5336369",
"0.5322799",
"0... | 0.7280461 | 1 |
Parses a string in short format to create an instance of the class. >>> l = Location.from_short_format( | def from_short_format(data):
m = re.search("geocommit\(1\.0\): ((?:[a-zA-Z0-9_-]+ [^,;]+, )*)([a-zA-Z0-9_-]+ [^,;]+);", data)
if m is None:
return None
values = m.group(1) + m.group(2)
data = dict()
for keyval in re.split(",\s+", values):
ke... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_location(location):\n city, state = location.strip().split(',')\n return f\"{city.strip().replace(' ', '-')}-{state.strip().replace(' ', '-')}\"",
"def from_str(cls, string):",
"def parse_location(location_str):\n def floatify(latlon):\n \"\"\" Turns a latlon string into a flo... | [
"0.5796067",
"0.57846785",
"0.5758145",
"0.5640593",
"0.5627394",
"0.5549736",
"0.53233117",
"0.5281842",
"0.526684",
"0.52663153",
"0.5237364",
"0.51908535",
"0.51495135",
"0.5133941",
"0.511369",
"0.5065242",
"0.5057992",
"0.50402415",
"0.5021159",
"0.5001185",
"0.49983492"... | 0.6709516 | 0 |
Creates a JSON request string for location information from google. The access points are a map from mac addresses to access point information dicts. >>> wlp = WifiLocationProvider() >>> wlp.request_dict()["wifi_towers"] | def request_dict(self):
ap_map = self.get_access_points()
if not ap_map:
return None
request = dict()
request["version"] = "1.1.0"
request["host"] = "localhost"
request["request_address"] = True
request["address_language"] = "en_GB"
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def google(self):\r\n prefix ='https://maps.googleapis.com/maps/api/staticmap?center='\r\n middle = '&zoom=14&size=400x400&markers='\r\n suffix = '&key=AIzaSyD5nqmDGFH1SUZxJAYVtFHP7RNjjFE9CHg'\r\n marker = '+'.join(self.placeToSearch) # marker in google format, no space but + separator\... | [
"0.54804015",
"0.53161436",
"0.5291573",
"0.5062097",
"0.50081235",
"0.49757034",
"0.49650696",
"0.49560982",
"0.49435157",
"0.49330306",
"0.49222854",
"0.48264238",
"0.4785291",
"0.4694317",
"0.46646068",
"0.4656133",
"0.46497852",
"0.4645924",
"0.46358958",
"0.46351212",
"0... | 0.70129 | 0 |
Extract features from points. | def extract_feat(self, points, img_metas=None):
voxels, num_points, coors = self.voxelize(points)
voxel_features = self.voxel_encoder(voxels, num_points, coors)
batch_size = coors[-1, 0].item() + 1
x = self.middle_encoder(voxel_features, coors, batch_size)
x = self.backbone(x)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_feat(self, points, img, img_metas):\n img_feats = self.extract_img_feat(img, img_metas)\n pts_feats = self.extract_pts_feat(points, img_feats, img_metas)\n return (img_feats, pts_feats)",
"def extract_features(input_feature_map, points=conv43Points):\n arr = []\n for y,x in... | [
"0.76258445",
"0.75479424",
"0.7520793",
"0.7304446",
"0.72170246",
"0.7079929",
"0.68441653",
"0.6794022",
"0.6782572",
"0.6651835",
"0.6640643",
"0.66359365",
"0.65395874",
"0.6499239",
"0.6448742",
"0.6447992",
"0.642975",
"0.6423805",
"0.63974905",
"0.6388154",
"0.6350114... | 0.7565338 | 1 |
Apply hard voxelization to points. | def voxelize(self, points):
voxels, coors, num_points = [], [], []
for res in points:
res_voxels, res_coors, res_num_points = self.voxel_layer(res)
voxels.append(res_voxels)
coors.append(res_coors)
num_points.append(res_num_points)
voxels = torch.c... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def voxelize(self, points):\n voxels, coors, num_points = [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = self.pts_voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n ... | [
"0.6439414",
"0.6234096",
"0.61848783",
"0.61010504",
"0.6074771",
"0.58545244",
"0.5738699",
"0.5695123",
"0.56734055",
"0.5653354",
"0.5604796",
"0.56036615",
"0.55895144",
"0.557377",
"0.55472815",
"0.5544818",
"0.55416125",
"0.54999644",
"0.54610586",
"0.54565",
"0.543493... | 0.6423426 | 1 |
Load the grid data from the sample earth_relief file. | def fixture_grid():
return load_earth_relief(registration="pixel") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fixture_grid():\n return load_earth_relief(registration=\"gridline\")",
"def load_train_grid40(return_imsize=True):\n file_base_path = os.path.join(rlvision.RLVISION_DATA,\n \"train\", \"gridworld_40\", \"gridworld_40\")\n\n if not os.path.isfile(file_base_path+\".h5... | [
"0.71628165",
"0.65157515",
"0.64941037",
"0.6290516",
"0.6283898",
"0.6261977",
"0.6137829",
"0.59520215",
"0.5852507",
"0.58480215",
"0.5800172",
"0.5742071",
"0.57125926",
"0.57125765",
"0.5712251",
"0.5711601",
"0.5700506",
"0.5699708",
"0.5662421",
"0.5648323",
"0.561772... | 0.7127945 | 1 |
grdfilter an input DataArray, and output as DataArray. | def test_grdfilter_dataarray_in_dataarray_out(grid):
result = grdfilter(grid=grid, filter="g600", distance="4")
# check information of the output grid
assert isinstance(result, xr.DataArray)
assert result.coords["lat"].data.min() == -89.5
assert result.coords["lat"].data.max() == 89.5
assert res... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=... | [
"0.72438073",
"0.71895283",
"0.6108349",
"0.6072146",
"0.60382855",
"0.6018336",
"0.58795875",
"0.58754164",
"0.5870362",
"0.5869875",
"0.5862751",
"0.5731808",
"0.56614274",
"0.5651315",
"0.56284916",
"0.5540049",
"0.55231196",
"0.5522951",
"0.5522951",
"0.54774076",
"0.5467... | 0.75263023 | 0 |
grdfilter an input DataArray, and output to a grid file. | def test_grdfilter_dataarray_in_file_out(grid):
with GMTTempFile(suffix=".nc") as tmpfile:
result = grdfilter(grid, outgrid=tmpfile.name, filter="g600", distance="4")
assert result is None # grdfilter returns None if output to a file
result = grdinfo(tmpfile.name, per_column=True)
a... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_grdfilter_file_in_dataarray_out():\n outgrid = grdfilter(\n \"@earth_relief_01d\", region=\"0/180/0/90\", filter=\"g600\", distance=\"4\"\n )\n assert isinstance(outgrid, xr.DataArray)\n assert outgrid.gmt.registration == 1 # Pixel registration\n assert outgrid.gmt.gtype == 1 # Geo... | [
"0.74676794",
"0.6965203",
"0.6943156",
"0.57479334",
"0.5405306",
"0.5393092",
"0.53729516",
"0.5346795",
"0.5268873",
"0.5247941",
"0.5225041",
"0.52224445",
"0.52160496",
"0.5199661",
"0.51719517",
"0.5155658",
"0.5155277",
"0.5070777",
"0.50582415",
"0.50448567",
"0.50338... | 0.77258784 | 0 |
grdfilter an input grid file, and output as DataArray. | def test_grdfilter_file_in_dataarray_out():
outgrid = grdfilter(
"@earth_relief_01d", region="0/180/0/90", filter="g600", distance="4"
)
assert isinstance(outgrid, xr.DataArray)
assert outgrid.gmt.registration == 1 # Pixel registration
assert outgrid.gmt.gtype == 1 # Geographic type
# ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=... | [
"0.82479215",
"0.73520637",
"0.7201283",
"0.63456845",
"0.63165814",
"0.585526",
"0.576018",
"0.57303125",
"0.56888694",
"0.5629014",
"0.5605087",
"0.55669653",
"0.5533368",
"0.5529867",
"0.55254185",
"0.5519094",
"0.54980975",
"0.5455956",
"0.5448404",
"0.54415244",
"0.54415... | 0.76401544 | 1 |
grdfilter an input grid file, and output to a grid file. | def test_grdfilter_file_in_file_out():
with GMTTempFile(suffix=".nc") as tmpfile:
result = grdfilter(
"@earth_relief_01d",
outgrid=tmpfile.name,
region=[0, 180, 0, 90],
filter="g600",
distance="4",
)
assert result is None # return ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=... | [
"0.704675",
"0.68028927",
"0.5841552",
"0.58407056",
"0.57574695",
"0.5738572",
"0.5637733",
"0.5556143",
"0.55201113",
"0.54455745",
"0.54209566",
"0.5312087",
"0.529742",
"0.52541614",
"0.5249761",
"0.52250904",
"0.5192693",
"0.5181852",
"0.5169016",
"0.51613814",
"0.516137... | 0.7821418 | 0 |
Check that grdfilter fails correctly. | def test_grdfilter_fails():
with pytest.raises(GMTInvalidInput):
grdfilter(np.arange(10).reshape((5, 2))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_filter_errors(self):\n\n with self.assertRaises(ValueError):\n self.test_table.filter()\n\n with self.assertRaises(ValueError):\n self.test_table.filter(mode='wrongmode', Property='Property')",
"def test_filter_function_settings_fail(self):\n with self.assertRa... | [
"0.67835367",
"0.6310252",
"0.62462205",
"0.61204123",
"0.6087536",
"0.60227674",
"0.59823984",
"0.59056413",
"0.58599424",
"0.57920605",
"0.573395",
"0.5714643",
"0.5699136",
"0.5680347",
"0.5672416",
"0.56558776",
"0.56553173",
"0.56384057",
"0.563598",
"0.55971026",
"0.558... | 0.71679807 | 0 |
Generate Pydantic Model files given the Postman Collection input file. | def generate_models(input_file):
if not os.path.exists(input_file):
console.print(
f":pile_of_poo: [bold red]No file found at the given path:[/bold red] [i yellow]{input_file}[/i yellow]"
)
exit(1)
# TODO: Add try/catch for other possible errors
collection = postman.load... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def input_models():\n return [\n PDBFile(\n Path(golden_data, \"protdna_complex_1.pdb\"),\n path=golden_data,\n score=42.0,\n restr_fname=Path(golden_data, \"example_ambig_1.tbl\")\n ),\n PDBFile(\n Path(golden_data, \"protdna_compl... | [
"0.56165767",
"0.5588253",
"0.5478929",
"0.54493415",
"0.5426958",
"0.53957415",
"0.53064173",
"0.52855295",
"0.52591866",
"0.52293295",
"0.5166549",
"0.50401545",
"0.5026971",
"0.50226295",
"0.5017243",
"0.4993968",
"0.4981129",
"0.49646968",
"0.4943805",
"0.4927351",
"0.491... | 0.7841486 | 0 |
Tests that update_status creates a correctly formatted url. Compares the url created by update_status to correct_url | def test_update_status(self):
content_url = 'https://api.github.com'
status = 'success'
token = '123'
correct_url = 'https://123:x-oauth-basic@api.github.com/'
post_req = update_status(content_url, status, token)
self.assertEqual(correct_url, post_req.url)
"""
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _checkServiceURL(self, serviceName, options):\n url = self._getURL(serviceName, options)\n system = options['System']\n module = options['Module']\n self.log.info(\"Checking URLs for %s/%s\" % (system, module))\n urlsConfigPath = os.path.join('/Systems', system, self.setup, 'URLs', module)\n ... | [
"0.6494839",
"0.63909346",
"0.62975353",
"0.61703724",
"0.61658883",
"0.61041194",
"0.6101163",
"0.6069293",
"0.59918106",
"0.5973078",
"0.5959037",
"0.59548736",
"0.59496254",
"0.5906024",
"0.5898634",
"0.58770066",
"0.58564144",
"0.58474356",
"0.58468753",
"0.5846074",
"0.5... | 0.7175717 | 0 |
Helper method for making a request to the Blockstore REST API | def api_request(method, url, **kwargs):
if not settings.BLOCKSTORE_API_AUTH_TOKEN:
raise ImproperlyConfigured("Cannot use Blockstore unless BLOCKSTORE_API_AUTH_TOKEN is set.")
kwargs.setdefault('headers', {})['Authorization'] = f"Token {settings.BLOCKSTORE_API_AUTH_TOKEN}"
response = requests.reques... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getBlocks(request):\n if request.method == 'GET':\n blockName = request.GET.get('block', '')\n bid = request.GET.get('bid', '')\n districtName=request.GET.get('district', '')\n stateName=request.GET.get('state', '')\n limit=request.GET.get('limit', '')\n if limit ==... | [
"0.63156694",
"0.63156694",
"0.58579916",
"0.5850182",
"0.58061534",
"0.5766906",
"0.57296586",
"0.57222426",
"0.5660935",
"0.56325656",
"0.55943286",
"0.5584792",
"0.5576492",
"0.5573743",
"0.5564736",
"0.55544657",
"0.5547002",
"0.5537741",
"0.55332834",
"0.55300415",
"0.55... | 0.7547545 | 0 |
Given data about a Collection returned by any blockstore REST API, convert it to a Collection instance. | def _collection_from_response(data):
return Collection(uuid=UUID(data['uuid']), title=data['title']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def return_collection(self, collection, request, environ, start_response,\n response_headers):\n response_type = self.content_negotiation(\n request, environ, self.ValueTypes)\n if response_type is None:\n return self.odata_error(\n reques... | [
"0.6577779",
"0.6343278",
"0.6237831",
"0.6221495",
"0.6207642",
"0.6203012",
"0.6195551",
"0.6181388",
"0.6098431",
"0.6020433",
"0.60099417",
"0.5990563",
"0.5976519",
"0.5969405",
"0.5889118",
"0.58554935",
"0.58416325",
"0.5817092",
"0.5749313",
"0.57408684",
"0.57094455"... | 0.71289414 | 0 |
Given data about a Bundle returned by any blockstore REST API, convert it to a Bundle instance. | def _bundle_from_response(data):
return Bundle(
uuid=UUID(data['uuid']),
title=data['title'],
description=data['description'],
slug=data['slug'],
# drafts: Convert from a dict of URLs to a dict of UUIDs:
drafts={draft_name: UUID(url.split('/')[-1]) for (draft_name, ur... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bundle_instance(obj):\n\n content, contents = osl_encode(obj, True)\n # should be a bunch of documents, not just one.\n bundle = [json.dumps(c) for c in contents]\n return bundle",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions]... | [
"0.6342084",
"0.6269155",
"0.62631524",
"0.6151707",
"0.60612935",
"0.5925814",
"0.59022117",
"0.5898795",
"0.5896431",
"0.58471715",
"0.5792275",
"0.5681991",
"0.56783265",
"0.56710964",
"0.566184",
"0.5634402",
"0.56337357",
"0.56250954",
"0.5600761",
"0.5547723",
"0.554208... | 0.76745385 | 0 |
Given data about a Draft returned by any blockstore REST API, convert it to a Draft instance. | def _draft_from_response(data):
return Draft(
uuid=UUID(data['uuid']),
bundle_uuid=UUID(data['bundle_uuid']),
name=data['name'],
updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),
files={
path: DraftFile(path=path, **file)
for path, ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_draft(self):\n return Draft(self)",
"def convert_to_draft(self, source_location):\r\n if source_location.category in DIRECT_ONLY_CATEGORIES:\r\n raise InvalidVersionError(source_location)\r\n original = self.collection.find_one({'_id': source_location.to_deprecated_son(... | [
"0.6512679",
"0.6157547",
"0.58887076",
"0.5854836",
"0.57501346",
"0.5479013",
"0.54623795",
"0.5257162",
"0.5218776",
"0.51486325",
"0.51365983",
"0.5121651",
"0.50962335",
"0.50772154",
"0.5046453",
"0.4987209",
"0.49718696",
"0.49420643",
"0.48823994",
"0.4866242",
"0.482... | 0.75933146 | 0 |
Create a new bundle. Note that description is currently required. | def create_bundle(collection_uuid, slug, title="New Bundle", description=""):
result = api_request('post', api_url('bundles'), json={
"collection_uuid": str(collection_uuid),
"slug": slug,
"title": title,
"description": description,
})
return _bundle_from_response(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_bundle(self):\n self._highest_bundle_id += 1\n bundle = Bundle(document=self, bundle_id=str(self._highest_bundle_id))\n self.bundles.append(bundle)\n bundle.number = len(self.bundles)\n return bundle",
"def firmware_pack_create(handle, org_name, name, rack_bundle_ver... | [
"0.668372",
"0.62939143",
"0.6256046",
"0.61546004",
"0.61484855",
"0.6085676",
"0.6012268",
"0.5936996",
"0.5896103",
"0.5847529",
"0.58094597",
"0.5789102",
"0.57620066",
"0.57466364",
"0.5724561",
"0.5723571",
"0.5719755",
"0.5677166",
"0.5674543",
"0.5627814",
"0.5617824"... | 0.71486557 | 0 |
Update a bundle's title, description, slug, or collection. | def update_bundle(bundle_uuid, **fields):
assert isinstance(bundle_uuid, UUID)
data = {}
# Most validation will be done by Blockstore, so we don't worry too much about data validation
for str_field in ("title", "description", "slug"):
if str_field in fields:
data[str_field] = fields.... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_in_place(self, request, original_bundle, new_data):\r\n\r\n # TODO: Is this the place to use MongoDB atomic operations to update the document?\r\n\r\n from tastypie.utils import dict_strip_unicode_keys\r\n original_bundle.data.update(**dict_strip_unicode_keys(new_data))\r\n\r\n ... | [
"0.64282304",
"0.5881592",
"0.58728546",
"0.5861083",
"0.5718554",
"0.56827384",
"0.5633981",
"0.5599242",
"0.55933994",
"0.55191696",
"0.54296964",
"0.5405867",
"0.54009414",
"0.5397951",
"0.5393401",
"0.5391082",
"0.5379236",
"0.53778917",
"0.5375577",
"0.5358188",
"0.53553... | 0.7425863 | 0 |
Delete the specified draft, removing any staged changes/files/deletes. Does not return any value. | def delete_draft(draft_uuid):
api_request('delete', api_url('drafts', str(draft_uuid))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self,\n draft_id,\n ):\n return self._invoke('delete',\n {\n 'draft_id': draft_id,\n })",
"def _delete_draft_message(draft):\n if draft is not None:\n draft.key.delete()\n return Ht... | [
"0.8477247",
"0.7536803",
"0.70039415",
"0.6800242",
"0.6678656",
"0.6091169",
"0.59882015",
"0.598721",
"0.5658908",
"0.5609526",
"0.55231327",
"0.55021924",
"0.5446673",
"0.54136",
"0.5397559",
"0.53314614",
"0.53098047",
"0.5283851",
"0.52465004",
"0.5162799",
"0.5152421",... | 0.78767365 | 1 |
Get the details of the specified bundle version | def get_bundle_version(bundle_uuid, version_number):
if version_number == 0:
return None
version_url = api_url('bundle_versions', str(bundle_uuid) + ',' + str(version_number))
return api_request('get', version_url) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version():\n return about.get_version()",
"def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()",
"def _get_via_app_bundle(self, path: pathlib.Path | str) -> str:\n\n path = pathlib.Path(path) / \"Contents\" / \"Info.plist\"\n\n if not... | [
"0.71063685",
"0.7055568",
"0.69581395",
"0.6899469",
"0.68665576",
"0.68434507",
"0.6821725",
"0.6754685",
"0.6730333",
"0.6705119",
"0.67003167",
"0.6696833",
"0.6665585",
"0.66480154",
"0.6647472",
"0.663893",
"0.6636554",
"0.66245484",
"0.6607001",
"0.658064",
"0.6521611"... | 0.7637766 | 0 |
Get a list of the files in the specified bundle version | def get_bundle_version_files(bundle_uuid, version_number):
if version_number == 0:
return []
version_info = get_bundle_version(bundle_uuid, version_number)
return [BundleFile(path=path, **file_metadata) for path, file_metadata in version_info["snapshot"]["files"].items()] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version_files(self, package, version):\n with self._conn.begin():\n return {\n row.filename\n for row in self._conn.execute(\n \"SELECT filename \"\n \"FROM get_version_files(%s, %s)\", (package, version)\n ... | [
"0.7293217",
"0.69692844",
"0.6767985",
"0.65724486",
"0.65694773",
"0.6546248",
"0.6306112",
"0.6290885",
"0.6254722",
"0.6251354",
"0.6199755",
"0.6197552",
"0.61879873",
"0.61280656",
"0.61088586",
"0.60504794",
"0.60438734",
"0.59778523",
"0.5970127",
"0.5940632",
"0.5922... | 0.7863409 | 0 |
Get a dictionary of the links in the specified bundle version | def get_bundle_version_links(bundle_uuid, version_number):
if version_number == 0:
return {}
version_info = get_bundle_version(bundle_uuid, version_number)
return {
name: LinkDetails(
name=name,
direct=LinkReference(**link["direct"]),
indirect=[LinkReferen... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bundle_links(bundle_uuid, use_draft=None):\n bundle = get_bundle(bundle_uuid)\n if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test\n draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object\n return get_draft(draft_uuid).li... | [
"0.65551144",
"0.63308424",
"0.6044713",
"0.5982717",
"0.5820173",
"0.58162254",
"0.5697063",
"0.5680008",
"0.5680008",
"0.5654119",
"0.5614463",
"0.5573144",
"0.55640477",
"0.55347484",
"0.5501417",
"0.5493479",
"0.5456731",
"0.5444286",
"0.54436064",
"0.5442817",
"0.5428819... | 0.7957789 | 0 |
Get a dict of all the files in the specified bundle. Returns a dict where the keys are the paths (strings) and the values are BundleFile or DraftFile tuples. | def get_bundle_files_dict(bundle_uuid, use_draft=None):
bundle = get_bundle(bundle_uuid)
if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test
draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object
return get_draft(draft_uuid).files
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bundle_files(bundle_uuid, use_draft=None):\n return get_bundle_files_dict(bundle_uuid, use_draft).values() # lint-amnesty, pylint: disable=dict-values-not-iterating",
"def get_bundle_version_files(bundle_uuid, version_number):\n if version_number == 0:\n return []\n version_info = get_bu... | [
"0.7790072",
"0.64431036",
"0.63394016",
"0.63182753",
"0.630003",
"0.62759304",
"0.6194732",
"0.6178175",
"0.61479455",
"0.6144583",
"0.6076846",
"0.60737944",
"0.6040074",
"0.5976576",
"0.59606755",
"0.5862239",
"0.5857732",
"0.58249676",
"0.5790602",
"0.5765096",
"0.576296... | 0.8014129 | 0 |
Get an iterator over all the files in the specified bundle or draft. | def get_bundle_files(bundle_uuid, use_draft=None):
return get_bundle_files_dict(bundle_uuid, use_draft).values() # lint-amnesty, pylint: disable=dict-values-not-iterating | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_files(self, block):\n \n raise NotImplementedError('get_files')",
"def __iter__(self):\n\n return iter(self.files)",
"def getFiles(self, getContent=False):\n for index, file in enumerate(self.files):\n if getContent:\n logger.debug(\n ... | [
"0.61938465",
"0.6149882",
"0.61284655",
"0.60207605",
"0.5920121",
"0.5907837",
"0.5906147",
"0.58372724",
"0.5820502",
"0.5797337",
"0.575836",
"0.5687807",
"0.56840444",
"0.56556374",
"0.56395537",
"0.5559454",
"0.5515556",
"0.54998755",
"0.54887193",
"0.54826784",
"0.5477... | 0.6996779 | 0 |
Get a dict of all the links in the specified bundle. Returns a dict where the keys are the link names (strings) and the values are LinkDetails or DraftLinkDetails tuples. | def get_bundle_links(bundle_uuid, use_draft=None):
bundle = get_bundle(bundle_uuid)
if use_draft and use_draft in bundle.drafts: # pylint: disable=unsupported-membership-test
draft_uuid = bundle.drafts[use_draft] # pylint: disable=unsubscriptable-object
return get_draft(draft_uuid).links
e... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bundle_version_links(bundle_uuid, version_number):\n if version_number == 0:\n return {}\n version_info = get_bundle_version(bundle_uuid, version_number)\n return {\n name: LinkDetails(\n name=name,\n direct=LinkReference(**link[\"direct\"]),\n indire... | [
"0.7305899",
"0.68209445",
"0.6084511",
"0.5888304",
"0.58443224",
"0.58205533",
"0.57978463",
"0.57962805",
"0.57638013",
"0.56552774",
"0.56552774",
"0.56552774",
"0.56552774",
"0.56552774",
"0.5595823",
"0.55944407",
"0.5573283",
"0.5561761",
"0.5560517",
"0.55239433",
"0.... | 0.73346376 | 0 |
Create or overwrite the file at 'path' in the specified draft with the given contents. To delete a file, pass contents=None. If you don't know the draft's UUID, look it up using get_or_create_bundle_draft() Does not return anything. | def write_draft_file(draft_uuid, path, contents):
api_request('patch', api_url('drafts', str(draft_uuid)), json={
'files': {
path: encode_str_for_draft(contents) if contents is not None else None,
},
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_file(path: Path, content: str) -> None:\n path.touch()\n with path.open(\"w\") as f:\n f.write(content)",
"def put_contents( path, name, contents, dryrun = False, get_config=lambda: {}, verbose=False ):\n t_file_fh, t_file_name = tempfile.mkstemp()\n os.close(t_file_fh)\n print(c... | [
"0.59362435",
"0.5571267",
"0.5565971",
"0.55636615",
"0.55125356",
"0.5481879",
"0.5437837",
"0.5421367",
"0.5293984",
"0.5231956",
"0.5211434",
"0.51726305",
"0.5162061",
"0.5162061",
"0.5150922",
"0.51438993",
"0.51366174",
"0.5113197",
"0.5112223",
"0.51041156",
"0.509784... | 0.80568475 | 0 |
Create or replace the link with the given name in the specified draft so that it points to the specified bundle version. To delete a link, pass bundle_uuid=None, version=None. If you don't know the draft's UUID, look it up using get_or_create_bundle_draft() Does not return anything. | def set_draft_link(draft_uuid, link_name, bundle_uuid, version):
api_request('patch', api_url('drafts', str(draft_uuid)), json={
'links': {
link_name: {"bundle_uuid": str(bundle_uuid), "version": version} if bundle_uuid is not None else None,
},
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_or_create_bundle_draft(bundle_uuid, draft_name):\n bundle = get_bundle(bundle_uuid)\n try:\n return get_draft(bundle.drafts[draft_name]) # pylint: disable=unsubscriptable-object\n except KeyError:\n # The draft doesn't exist yet, so create it:\n response = api_request('post',... | [
"0.58408153",
"0.51587373",
"0.49001387",
"0.4804741",
"0.47611052",
"0.47401834",
"0.46968883",
"0.46876737",
"0.45107222",
"0.4470004",
"0.44540083",
"0.4445877",
"0.44092384",
"0.44081253",
"0.44045332",
"0.44012755",
"0.43953812",
"0.4391029",
"0.43773264",
"0.43721762",
... | 0.8172148 | 0 |
Ensure that the given URL Blockstore is a URL accessible from the end user's browser. | def force_browser_url(blockstore_file_url):
# Hack: on some devstacks, we must necessarily use different URLs for
# accessing Blockstore file data from within and outside of docker
# containers, but Blockstore has no way of knowing which case any particular
# request is for. So it always returns a URL s... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_url(self):\n pass",
"def check_url_invalidity(self) -> bool:\n validate = URLValidator()\n try:\n validate(self.args.url)\n return False\n except ValidationError:\n return True",
"def check_url(url=None, parse_url=None):\n return ... | [
"0.6467403",
"0.61517715",
"0.61352235",
"0.6027143",
"0.6014926",
"0.6010201",
"0.6007184",
"0.599208",
"0.59750557",
"0.5965351",
"0.5957245",
"0.5952767",
"0.5918729",
"0.5906625",
"0.589159",
"0.58743834",
"0.58737737",
"0.58627915",
"0.5818365",
"0.581255",
"0.5793624",
... | 0.66461307 | 0 |
Computes the forward pass for the tanh activation function. | def tanh_forward(self, x):
#############################################################################
# TODO: Implement the tanh forward pass. #
#############################################################################
out = np.tanh(x)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward_hidden_activation(self, X):\n return np.tanh(X)",
"def test_tanh_activation(self):\n self.assertEqual([0.099668, 0.099668], list(\n af.TanH().output(np.array([0.1, 0.1]))))\n self.assertEqual([0.990066, 0.990066], list(\n af.TanH().derivative(np.array([0.1, ... | [
"0.7288412",
"0.7224226",
"0.71908355",
"0.7186722",
"0.7179838",
"0.71629184",
"0.71265453",
"0.7122386",
"0.7074787",
"0.7027048",
"0.69930476",
"0.69930476",
"0.6986668",
"0.6968281",
"0.6967701",
"0.6929699",
"0.6904969",
"0.68929416",
"0.68862855",
"0.6878756",
"0.687386... | 0.8011069 | 0 |
Computes the forward pass of a rectified linear unit (ReLU). | def relu_forward(self, x):
#out = None
#############################################################################
# TODO: Implement the ReLU forward pass. #
#############################################################################
out = n... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def relu_forward(x):\n ############################################################################\n # TODO: Implement the ReLU forward pass. #\n ############################################################################\n ######################################################... | [
"0.6923425",
"0.6670239",
"0.6663412",
"0.6650045",
"0.6630312",
"0.65392053",
"0.64895386",
"0.648836",
"0.64600945",
"0.6429588",
"0.6399773",
"0.63981783",
"0.63528794",
"0.63357466",
"0.63077855",
"0.62966233",
"0.6285175",
"0.6278084",
"0.62459034",
"0.62427825",
"0.6203... | 0.67316556 | 1 |
Computes the backward pass for a layer of rectified linear units (ReLUs). | def relu_backward(self, dUpper, cache):
x = cache
#############################################################################
# TODO: Implement the ReLU backward pass. #
#############################################################################
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backward(ctx, grad_output):\n loss, reg, u, lbda = ctx.saved_tensors\n\n device = u.device\n\n # do clever computations\n eps = 1e-10\n grad, = torch.autograd.grad(loss, u, only_inputs=True,\n retain_graph=True)\n x = (u - eps * grad)... | [
"0.6948249",
"0.6898323",
"0.6847992",
"0.6847992",
"0.68477255",
"0.68407637",
"0.68200904",
"0.6768128",
"0.67572653",
"0.67565167",
"0.67562467",
"0.6743265",
"0.67394495",
"0.6715497",
"0.67090523",
"0.67006433",
"0.669643",
"0.669643",
"0.66886026",
"0.6666757",
"0.66667... | 0.70406467 | 0 |
returns count of sequences in given fasta file(s) The input_fasta_files is a list of fasta filepaths | def get_sequence_count(input_fasta_files):
# Correction for the case that only one file passed
if type(input_fasta_files)==str:
input_fasta_files=[input_fasta_files]
count=0
for n in input_fasta_files:
fasta_f=open(n,'U')
for label,seq in MinimalFastaParser(fasta_f)... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_seqs_in_filepaths(fasta_filepaths, seq_counter=count_seqs):\r\n total = 0\r\n counts = []\r\n inaccessible_filepaths = []\r\n # iterate over the input files\r\n for fasta_filepath in fasta_filepaths:\r\n # if the file is actually fastq, use the fastq parser.\r\n # otherwise u... | [
"0.7568509",
"0.7414015",
"0.67043936",
"0.6663063",
"0.6578762",
"0.61955506",
"0.61545354",
"0.59026015",
"0.5882633",
"0.586203",
"0.5835468",
"0.58194464",
"0.57913864",
"0.5739672",
"0.5698394",
"0.569644",
"0.5685289",
"0.56555086",
"0.56553745",
"0.56428534",
"0.564140... | 0.8925589 | 0 |
Builds list of primer objects from initial_primers | def construct_primers(initial_primers):
primers=[]
for n in initial_primers:
primers.append(ProspectivePrimer(n[0],n[1],initial_primers[n]))
return primers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_primers(header,\r\n mapping_data):\r\n\r\n if \"LinkerPrimerSequence\" in header:\r\n primer_ix = header.index(\"LinkerPrimerSequence\")\r\n else:\r\n raise IndexError(\r\n (\"Mapping file is missing LinkerPrimerSequence field.\"))\r\n if \"ReversePrimer\" i... | [
"0.61152774",
"0.5685748",
"0.54845613",
"0.54509944",
"0.5440025",
"0.53709817",
"0.53416723",
"0.53409606",
"0.53197396",
"0.5274368",
"0.5267137",
"0.5261285",
"0.52573895",
"0.5245288",
"0.5206712",
"0.5176481",
"0.51707155",
"0.5140353",
"0.5139623",
"0.5128205",
"0.5076... | 0.8783308 | 0 |
convert DNA codes to numeric values for bitwise comparisons returns a numeric list corresponding to the nucleotide sequence | def convert_to_numeric(sequence):
int_mapped_seq=[]
DNA_to_numeric = get_DNA_to_numeric()
for n in sequence:
int_mapped_seq.append(DNA_to_numeric[n])
return int_mapped_seq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def str2NumList(strn):\n\treturn [ord(chars) for chars in strn]",
"def nucleotide_numbering():\n nucleotide_to_number = {'A': 0, 'C': 1, 'G': 2, 'T': 3}\n number_to_nucleotide = {0: 'A', 1: 'C', 2: 'G', 3: 'T'}\n return nucleotide_to_number, number_to_nucleotide",
"def encode_rna(x):\n return [0 if... | [
"0.6309183",
"0.6230118",
"0.6208863",
"0.6096392",
"0.5975599",
"0.59637845",
"0.5953173",
"0.59482646",
"0.580563",
"0.5801203",
"0.5766781",
"0.5749419",
"0.5746811",
"0.57197213",
"0.5715822",
"0.5704024",
"0.570132",
"0.5697264",
"0.5672337",
"0.56659025",
"0.56524974",
... | 0.69297695 | 0 |
returns a corrected unaligned index based on aligned index | def get_corrected_index(seq,
aligned_index):
# Counts the number of nucleotides in aligned sequence, returns
# count of nucleotides occuring before aligned index reached
slice_seq=seq[0:aligned_index]
# If different gap characters used, may need to modify this
# In curre... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_align_idx(self):\n self.amp4.rotateAng([5, 5, 5], ang='deg')\n al = align(self.amp3, self.amp4, mv=[0, 1, 2, 3], sv=[0, 1, 2, 3], method='idxPoints')\n all(self.assertAlmostEqual(al.m.vert[i, 0], al.s.vert[i, 0], delta=0.1) for i in range(al.s.vert.shape[0]))",
"def idx2off(i):\n ... | [
"0.63992536",
"0.58021104",
"0.56746924",
"0.5617751",
"0.56103647",
"0.5595864",
"0.55761355",
"0.54659295",
"0.54612035",
"0.5456233",
"0.5443871",
"0.5337581",
"0.53357214",
"0.53213453",
"0.529732",
"0.5248855",
"0.5244388",
"0.5244107",
"0.52407306",
"0.5207596",
"0.5194... | 0.6640512 | 0 |
Appends upstream and downstream sequence information for primer hit Because some sequences may be hit near the 5' or 3' end of sequence read, it is necessary to append N's to the upstream or downstream region. This makes both visual inspection of the primers easier and allows for alignment objects to be loaded given a ... | def append_primer_hit(primer,
label,
hit_index,
region_slice,
overall_length,
unaligned_seq,
primer_len):
primer.match_count+=1
primer.labels.append(label.split()[0])
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def primer_start_fix(self):\r\n #TODO this function will not be used anymore, remove?\r\n if self.type in [\"forward_primer\", \"reverse_primer\", \"PCR_product\"]:\r\n self.start += 1\r\n if self.type == \"region\" and self.source == \"Primer3\":\r\n # this is the region... | [
"0.61210686",
"0.6015792",
"0.5825172",
"0.5388359",
"0.5349065",
"0.5332983",
"0.5280968",
"0.5251739",
"0.52036214",
"0.5173795",
"0.5172554",
"0.516637",
"0.5143279",
"0.5141967",
"0.5104009",
"0.5093995",
"0.5085161",
"0.50663805",
"0.50557864",
"0.50397253",
"0.49996945"... | 0.6823022 | 0 |
searches through integer mapped sequence to find specific matches This function does not append data from sequences, rather its purpose is to eliminate nonspecific primers before the sensitive primers (along with the associated sequence data) are built. | def find_specific_primer_matches(primers,
integer_mapped_seq,
deletion_threshold,
seq_count,
sequence_length,
label,
unali... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_sensitive_primer_matches(primers,\n integer_mapped_seq,\n deletion_threshold,\n seq_count,\n sequence_length,\n label,\n ... | [
"0.70073456",
"0.6101962",
"0.5778454",
"0.57115793",
"0.5597058",
"0.55890054",
"0.55768037",
"0.54905343",
"0.54866445",
"0.5374324",
"0.5360855",
"0.53143424",
"0.5302089",
"0.523852",
"0.5202517",
"0.52015847",
"0.5189258",
"0.5116563",
"0.51111543",
"0.50882447",
"0.5085... | 0.72535133 | 0 |
Iterates list of primer objects, calculates percent matches | def calculate_percent_match(primers,
seq_count,
exclude_seq_count=1):
# Calculate percent of sequences that are 'hit' by each primer
for n in range(len(primers)):
# Calculate percent perfect match
primers[n].percent_match=float(primers[n].m... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_p(candidate, reference):\n matches = 0\n for grama in candidate:\n if grama in reference:\n matches += 1\n return matches/len(candidate)",
"def resultat_match(self, binomes):\n for binome in binomes:\n while True:\n score_un = self.vue.ent... | [
"0.61019254",
"0.5894499",
"0.58586526",
"0.5768695",
"0.5768504",
"0.5746886",
"0.57437664",
"0.57017726",
"0.5665368",
"0.5643431",
"0.56431913",
"0.5563038",
"0.55320835",
"0.5523601",
"0.54986566",
"0.54957074",
"0.54766464",
"0.54745346",
"0.5454031",
"0.54450965",
"0.54... | 0.71711147 | 0 |
Appends standard aligned index value to ProspectivePrimer objects | def append_std_aligned_index(primers,
standard_index_seq,
region_slice):
for n in primers:
n.std_index = True
standard_unaligned_index = get_corrected_index(standard_index_seq,
n.aligned_index)
# 5' for forward primer wo... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def table_key(self, reindex_dict):\n reindexed_marks = []\n for m in self.component1.marks:\n new_m = reindex_dict.get(m)\n if new_m == None:\n if len(reindex_dict) == 0:\n new_m = 0\n else:\n new_m = max(reinde... | [
"0.590127",
"0.5743373",
"0.5539483",
"0.5368967",
"0.52574486",
"0.5224323",
"0.52219516",
"0.5219616",
"0.51754165",
"0.5169657",
"0.5157575",
"0.51386046",
"0.51257545",
"0.5122756",
"0.50792426",
"0.50733215",
"0.5063912",
"0.5045901",
"0.50283784",
"0.50128955",
"0.50089... | 0.68654364 | 0 |
CASSANDRA9871 Test that we can replace a node that is shutdown gracefully. | def replace_shutdown_node_test(self):
self._replace_node_test(gently=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_node_graceful_shutdown(self, proc_info, controller_node):\n launch_testing.asserts.assertExitCodes(proc_info, process=controller_node)",
"def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\... | [
"0.72033966",
"0.71024096",
"0.70595825",
"0.69823843",
"0.69612706",
"0.6911128",
"0.68039066",
"0.6747855",
"0.65598166",
"0.6542973",
"0.6518864",
"0.64961773",
"0.6485054",
"0.6472087",
"0.6463794",
"0.6445934",
"0.6360621",
"0.6331515",
"0.6325584",
"0.627847",
"0.626650... | 0.75695765 | 0 |
When starting a node from a clean slate with the same address as an existing down node, the node should error out even when auto_bootstrap = false (or the node is a seed) and tell the user to use replace_address. CASSANDRA10134 | def fail_without_replace_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3)
node1, node2, node3 = cluster.nodelist()
cluster.seeds.remove(node3)
NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')
if DISABLE_VNODES:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(nod... | [
"0.6077486",
"0.59152275",
"0.5730985",
"0.57131267",
"0.5675744",
"0.55063087",
"0.5453649",
"0.54527897",
"0.54008937",
"0.5391057",
"0.5317235",
"0.53120404",
"0.52710044",
"0.52428687",
"0.5236482",
"0.51801217",
"0.51627666",
"0.51423806",
"0.51393414",
"0.51322865",
"0.... | 0.6074668 | 1 |
To handle situations such as failed disk in a JBOD, it may be desirable to replace a node without bootstrapping. In such scenarios best practice advice has been to wipe the node's system keyspace data, set the initial tokens via cassandra.yaml, startup without bootstrap and then repair. Starting the node as a replaceme... | def unsafe_replace_test(self):
debug('Starting cluster with 3 nodes.')
cluster = self.cluster
cluster.populate(3)
cluster.set_batch_commitlog(enabled=True)
node1, node2, node3 = cluster.nodelist()
cluster.seeds.remove(node3)
NUM_TOKENS = os.environ.get('NUM_TOKENS... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fail_without_replace_test(self):\n debug(\"Starting cluster with 3 nodes.\")\n cluster = self.cluster\n cluster.populate(3)\n node1, node2, node3 = cluster.nodelist()\n cluster.seeds.remove(node3)\n NUM_TOKENS = os.environ.get('NUM_TOKENS', '256')\n if DISABLE_V... | [
"0.71049017",
"0.682219",
"0.65992427",
"0.6297513",
"0.62767684",
"0.6222005",
"0.61809766",
"0.60596913",
"0.6026414",
"0.5952776",
"0.5792869",
"0.5709729",
"0.56964314",
"0.55712783",
"0.55003196",
"0.54920876",
"0.5481311",
"0.54768157",
"0.5469145",
"0.5460061",
"0.5439... | 0.6841581 | 1 |
Test that replace fails when there are insufficient replicas CASSANDRA11848 | def replace_with_insufficient_replicas_test(self):
debug("Starting cluster with 3 nodes.")
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
if DISABLE_VNODES:
num_tokens = 1
else:
# a little hacky but gre... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_redis_increase_replica_count_usual_case():",
"def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)",
"def resumable_replace_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluste... | [
"0.6814609",
"0.68009305",
"0.6593998",
"0.639997",
"0.63156056",
"0.6274903",
"0.627408",
"0.6214506",
"0.6018259",
"0.58900636",
"0.5862111",
"0.5845466",
"0.5839016",
"0.5836313",
"0.5830308",
"0.5829698",
"0.57167",
"0.56765765",
"0.5672758",
"0.56702614",
"0.5639919",
... | 0.73329014 | 0 |
Test that multidc replace works when rf=1 on each dc | def multi_dc_replace_with_rf1_test(self):
cluster = self.cluster
cluster.populate([1, 1])
cluster.start()
node1, node2 = cluster.nodelist()
node1 = cluster.nodes['node1']
yaml_config = """
# Create the keyspace and table
keyspace: keyspace1
keyspa... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_data_source_soaps_id_replace_post(self):\n pass",
"def test_replace_groups(self):\n pass",
"def test_replace_group(self):\n pass",
"def test_replace_identity(self):\n pass",
"def _add_dc_after_mv_test(self, rf, nts):\n\n session = self.prepare(rf=rf)\n\n l... | [
"0.5868793",
"0.57626647",
"0.57394946",
"0.57088983",
"0.56332976",
"0.5594742",
"0.556429",
"0.55391914",
"0.54893154",
"0.5411253",
"0.5371333",
"0.52877027",
"0.52727324",
"0.52647424",
"0.52616",
"0.524178",
"0.5240369",
"0.52387863",
"0.5235418",
"0.519722",
"0.5170311"... | 0.6363337 | 0 |
Initialize our `Finitefield` object with a given `prime` number | def __init__(self, prime):
if prime != 0: # Check if prime is different from zero
self.prime = prime # Assign it
else:
raise ValueError # Raise an error if prime is negative | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(s, p):\n Zmod.__init__(s, p)\n if s.element_class != FiniteFieldElement:\n raise ArithmeticError(\"Invalid Prime : %d\" % p)\n s.p = p",
"def __init__(self, prime, server):\n self.N = prime\n self.g = 2\n self.k = 3\n self.server = server",
... | [
"0.69248664",
"0.64870983",
"0.5943145",
"0.5748694",
"0.57429487",
"0.5697714",
"0.5641275",
"0.5625622",
"0.5612961",
"0.56080437",
"0.5605338",
"0.5581152",
"0.555875",
"0.5549988",
"0.55280745",
"0.5524173",
"0.54919416",
"0.5483966",
"0.5442872",
"0.5397962",
"0.5372536"... | 0.74800426 | 0 |
Obtain equivalence class of a certain number. | def equivalence(self, n):
return n % self.prime | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_equivalent_class(record):\n equivalent_class = {}\n class_members=[]\n max_class_number = -1\n for pair in record:\n if (pair[0] in equivalent_class) and (not (pair[1] in equivalent_class)):\n equivalent_class[pair[1]] = equivalent_class[pair[0]]\n if (not(pair[0] i... | [
"0.5669883",
"0.5552854",
"0.5449721",
"0.5332555",
"0.5325522",
"0.53195566",
"0.53147596",
"0.529187",
"0.5286526",
"0.5284268",
"0.52787423",
"0.52514434",
"0.5224838",
"0.5221005",
"0.522084",
"0.5210592",
"0.52059454",
"0.52004546",
"0.51959074",
"0.51881945",
"0.5165687... | 0.61218315 | 0 |
Obtain this finite fields `prime` number. | def get_prime(self):
return self.prime | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_prime(self):\n if(not self._constructed): raise EGCSUnconstructedStateError()\n return self._prime",
"def Em_prime(self):\n delta_electrons = self._GetElectronDiff()\n assert delta_electrons != 0\n return - self.DeltaGmPrime() / (constants.F*delta_electrons)",
"def E_... | [
"0.71399903",
"0.66589963",
"0.6651876",
"0.6646228",
"0.662972",
"0.647131",
"0.6122692",
"0.6115008",
"0.6047143",
"0.60251045",
"0.6022619",
"0.5988457",
"0.59664625",
"0.596395",
"0.5935993",
"0.5896268",
"0.58959156",
"0.58298343",
"0.5828062",
"0.5750545",
"0.5699855",
... | 0.7992657 | 0 |
Returns true if i is a leaf. True if i has no children | def is_leaf(self, i):
return len(self.children[i]) == 0 or len(self.pq[i]) == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_leaf(self):\n if len(self.children) == 0:\n return True\n else:\n return False",
"def is_leaf(self):\r\n return self.num_children() == 0",
"def is_leaf(self):\n return len(self.children) == 0",
"def is_leaf(self):\n return len(self.child_list) =... | [
"0.82774425",
"0.8232412",
"0.8213968",
"0.8201722",
"0.8164184",
"0.81311023",
"0.81169236",
"0.8108716",
"0.8108716",
"0.8108716",
"0.8108716",
"0.80393773",
"0.80339175",
"0.7978852",
"0.797204",
"0.79555184",
"0.79298913",
"0.78709584",
"0.78234917",
"0.7776668",
"0.77559... | 0.8860917 | 0 |
Gives the children of node i that has elements elems. In this version, it grabs all 2 partitions if they are not there and caches this in children[i]. | def get_children(self, i, elems):
# if len(elems) == 1:
# return []
# elif self.explored[i]:
# return self.children[i]
# else:
self.children[i], self.children_elems[i] = self._get_children(list(elems)) # all_two_partitions(list(elems))
# self.update_... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def children(self, i):\n if i < 0:\n raise IndexError()\n return self._children[i]",
"def get_children_elements(self):\n\n pass",
"def children_recursive(self, i):\n result = []\n for child in self.children(i):\n result += [child] + self.children_recursi... | [
"0.63445675",
"0.60507435",
"0.59418297",
"0.5931057",
"0.5728822",
"0.5700945",
"0.56202734",
"0.5581891",
"0.55794436",
"0.55772966",
"0.55548924",
"0.5552774",
"0.5548988",
"0.5543629",
"0.5530173",
"0.551414",
"0.55022126",
"0.5479672",
"0.54726505",
"0.5466317",
"0.54577... | 0.81951326 | 0 |
Get the node corresponding to the given elements, create new id if needed. Creates a new id if needed. | def record_node(self, elements: frozenset) -> int:
logging.debug('get node id from elements %s', str(elements))
if elements not in self.elems2node:
logging.debug('get node id from elements %s. new node! %s', str(elements), self.next_id)
logging.debug('Clusters =%s ', str(self.clu... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getNodeById(self, nodes, id):\n for item in nodes:\n if item.getProperty('id') == id:\n return item",
"def create_id(elements: Iterable) -> str:\r\n i = 1\r\n while str(i) in elements:\r\n i += 1\r\n return str(i)",
"def update_node_id(node: Element) -> None... | [
"0.5942844",
"0.58788836",
"0.58618236",
"0.55669534",
"0.55313444",
"0.54860556",
"0.5439955",
"0.54111147",
"0.53839993",
"0.53363097",
"0.533143",
"0.53260684",
"0.53103817",
"0.53013533",
"0.5292114",
"0.5276335",
"0.5268265",
"0.5268265",
"0.5268265",
"0.5268265",
"0.526... | 0.6063719 | 0 |
Push RSPECs to Jira | def push_rspecs(host, auth, rspecs):
for rspec in rspecs:
description = rspec["fields"]["description"]
click.echo(f"Pushing {rspec['key']} ", err=True)
data = {
"update": {
"description": [
{
"set": description
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def push_current_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n now = datetime.datetime.utcnow().strftime(DATE_FORMAT)\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n\n # ... | [
"0.60630643",
"0.59804326",
"0.5954895",
"0.588658",
"0.5663922",
"0.5642216",
"0.54534817",
"0.54471946",
"0.54439807",
"0.54370046",
"0.5394964",
"0.5368015",
"0.5364517",
"0.53410673",
"0.5320143",
"0.5316345",
"0.5315661",
"0.53115475",
"0.53106874",
"0.5309121",
"0.52791... | 0.7445449 | 0 |
Retrieve metadata describing an arrayset artifact. | def get_model_arrayset_metadata(database, model, aid, arrays=None, statistics=None, unique=None):
if isinstance(arrays, str):
arrays = slycat.hyperchunks.parse(arrays)
if isinstance(statistics, str):
statistics = slycat.hyperchunks.parse(statistics)
if isinstance(unique, str):
unique... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metadata(self) -> 'outputs.DataCollectionEndpointResponseMetadata':\n return pulumi.get(self, \"metadata\")",
"def get_assets_metadata(self):\n return Metadata(**settings.METADATA['asset_ids'])",
"def GetMetadata(self):\n return self.dict['meta']",
"def _getAllMeta(self):\n try:\n... | [
"0.5965084",
"0.5936909",
"0.57802886",
"0.57577825",
"0.5755986",
"0.5726956",
"0.5722624",
"0.57162297",
"0.57162297",
"0.5708068",
"0.5654824",
"0.5649437",
"0.56223595",
"0.56223595",
"0.56223595",
"0.56223595",
"0.56223595",
"0.56012475",
"0.5600938",
"0.5594341",
"0.558... | 0.64080864 | 0 |
Start a new model array set artifact. | def put_model_arrayset(database, model, aid, input=False):
model = database.get('model',model["_id"])
slycat.web.server.update_model(database, model, message="Starting array set %s." % (aid))
storage = uuid.uuid4().hex
with slycat.web.server.hdf5.lock:
with slycat.web.server.hdf5.create(storage)... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def put_model_array(database, model, aid, array_index, attributes, dimensions):\n slycat.web.server.update_model(database, model, message=\"Starting array set %s array %s.\" % (aid, array_index))\n model = database.get('model', model['_id'])\n storage = model[\"artifact:%s\" % aid]\n with slycat.web.se... | [
"0.61201274",
"0.55832493",
"0.55832493",
"0.55684704",
"0.5436291",
"0.5358201",
"0.5327634",
"0.52918756",
"0.52394766",
"0.5192222",
"0.5192222",
"0.5192222",
"0.5192222",
"0.51882184",
"0.5163489",
"0.5162972",
"0.5125661",
"0.51193255",
"0.50964105",
"0.50704527",
"0.505... | 0.5996218 | 1 |
Write data to an arrayset artifact. | def put_model_arrayset_data(database, model, aid, hyperchunks, data):
# cherrypy.log.error("put_model_arrayset_data called with: {}".format(aid))
if isinstance(hyperchunks, str):
hyperchunks = slycat.hyperchunks.parse(hyperchunks)
data = iter(data)
slycat.web.server.update_model(database, model... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_write_element(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)f8')\n dset = f.create_dataset('x', (10,), dtype=dt)\n\n data = np.array([1, 2, 3.0])\n dset[4] = data\n\n out = dset[4]\n assert np.all(out == data)",
"def test_write_element(self):\n dt ... | [
"0.65863824",
"0.6536039",
"0.62674475",
"0.6248463",
"0.6099719",
"0.6086565",
"0.5947786",
"0.5932743",
"0.5904107",
"0.5831756",
"0.58212835",
"0.5807893",
"0.57833177",
"0.57301813",
"0.566023",
"0.56549037",
"0.5618263",
"0.5617314",
"0.55858254",
"0.5577587",
"0.5571146... | 0.6964377 | 0 |
Delete a model parameter in the couch database | def delete_model_parameter(database, model, aid):
with get_model_lock(model["_id"]):
del model["artifact:%s" % aid]
del model["artifact-types"][aid]
database.save(model) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_field(model, *arg):\n return model._pw_index_.delete_field(*arg)",
"def delete_parameter(request, parameter, **_kwargs):\n pass",
"def obj_delete(self, request=None, **kwargs):\n self.get_collection(request).remove({ \"_id\": ObjectId(kwargs.get(\"pk\")) })",
"async def rm_object(model,... | [
"0.74125355",
"0.7057663",
"0.6858939",
"0.6829538",
"0.6783668",
"0.6773147",
"0.6772022",
"0.6767092",
"0.6750633",
"0.6732334",
"0.67046046",
"0.6679741",
"0.6674734",
"0.6657565",
"0.663638",
"0.66068643",
"0.6587644",
"0.6583732",
"0.6554846",
"0.6554846",
"0.6550237",
... | 0.7517227 | 0 |
Create a cached remote session for the given host. | def create_session(hostname, username, password):
return slycat.web.server.remote.create_session(hostname, username, password, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_session(self,session_id,host_id,host_name,spotify_token):\n self.sessions[session_id] = {\n \"HOST\" : {\n \"ID\" : host_id,\n \"NAME\" : host_name,\n \"spotify_token\" : spotify_token,\n \"spotify_player\"... | [
"0.60513884",
"0.5854512",
"0.5685533",
"0.56772697",
"0.56223834",
"0.56103456",
"0.55523884",
"0.5550031",
"0.5545152",
"0.55318147",
"0.54500717",
"0.54113406",
"0.5394822",
"0.53833413",
"0.53454465",
"0.5333294",
"0.53289765",
"0.53118473",
"0.5303092",
"0.52960545",
"0.... | 0.6709572 | 0 |
get the resonse_url and clean it to make sure that we are not being spoofed | def response_url():
current_url = urlparse(cherrypy.url()) # gets current location on the server
try:
location = cherrypy.request.json["location"]
if parse_qs(urlparse(location['href']).query)['from']: # get from query href
cleaned_url = parse_qs(urlparse(location['href']).query)['... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_long_url(self):\n url = self.cleaned_data.get('long_url')\n headers = getattr(settings, 'DEFLECT_REQUESTS_HEADERS', None)\n timeout = getattr(settings, 'DEFLECT_REQUESTS_TIMEOUT', 3.0)\n try:\n r = requests.get(url, headers=headers, timeout=timeout,\n ... | [
"0.63518",
"0.622883",
"0.6113596",
"0.61127734",
"0.6089494",
"0.60464",
"0.60459745",
"0.60136104",
"0.6003202",
"0.5996835",
"0.5968541",
"0.5964263",
"0.5957325",
"0.5915546",
"0.5894142",
"0.5866399",
"0.58576095",
"0.5803815",
"0.57866335",
"0.5758677",
"0.5738808",
"... | 0.6576252 | 0 |
try and delete any outdated sessions for the user if they have the cookie for it | def clean_up_old_session(user_name=None):
cherrypy.log.error("cleaning all sessions for %s" % user_name)
if "slycatauth" in cherrypy.request.cookie:
try:
# cherrypy.log.error("found old session trying to delete it ")
sid = cherrypy.request.cookie["slycatauth"].value
c... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def session_gc(session_store):\n if random.random() < 0.001:\n # we keep session one week\n if hasattr(session_store, 'gc'):\n session_store.gc()\n return\n last_week = time.time() - 60*60*24*7\n for fname in os.listdir(session_store.path):\n path = o... | [
"0.7129545",
"0.697994",
"0.672983",
"0.6679393",
"0.66634405",
"0.6570749",
"0.6550736",
"0.6536721",
"0.6528558",
"0.651916",
"0.6491734",
"0.64853835",
"0.6464785",
"0.6463646",
"0.6447692",
"0.6446159",
"0.6430663",
"0.64276487",
"0.64138967",
"0.63970834",
"0.6382262",
... | 0.77049917 | 0 |
check to see if the session user is equal to the apache user raise 403 and delete the session if they are not equal | def check_user(session_user, apache_user, sid):
if session_user != apache_user:
cherrypy.log.error("session_user::%s is not equal to apache_user::%s in standard auth"
"deleting session and throwing 403 error to the browser" % (session_user, apache_user))
# force a lock so ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"def process_request(self, request):\n if request.user.is_authenticated():\n cache = get_cache('default')\n cache_timeout = 86400\n cache_key = \"user_pk_%s_restrict\"... | [
"0.6440854",
"0.64278203",
"0.63911074",
"0.62447345",
"0.6228785",
"0.6101922",
"0.60444987",
"0.60331225",
"0.6016866",
"0.59865224",
"0.5937146",
"0.5915369",
"0.5902086",
"0.58984023",
"0.5892552",
"0.58844024",
"0.58813035",
"0.58813035",
"0.58813035",
"0.58813035",
"0.5... | 0.8276541 | 0 |
checks that the connection is https and then returns the users remote ip | def check_https_get_remote_ip():
if not (cherrypy.request.scheme == "https" or cherrypy.request.headers.get("x-forwarded-proto") == "https"):
cherrypy.log.error("slycat-standard-authentication.py authenticate",
"cherrypy.HTTPError 403 secure connection required.")
rai... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getRemoteHost():",
"def remoteip(self) :\n\t\ttry :\n\t\t\treturn self._remoteip\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_remote_ip(request):\n \n return utilities.get_remote_ip(request)",
"def get_remote_ip(request):\n return request.META.get(\"HTTP_REMOTE_ADDR\", request.META.get(... | [
"0.7072389",
"0.6898136",
"0.6809364",
"0.67104733",
"0.65298426",
"0.6514117",
"0.6488618",
"0.6484981",
"0.6445124",
"0.6399658",
"0.63811284",
"0.6367772",
"0.6339807",
"0.6320289",
"0.6307788",
"0.62999815",
"0.62957853",
"0.6291374",
"0.6265773",
"0.62566173",
"0.6234748... | 0.8518679 | 0 |
Method that displays the original and blurred images | def displayImages(self):
plt.figure(figsize=(8,6))
plt.subplot(1,2,1)
plt.imshow( self.original_image, cmap="gray")
plt.title("Original Image")
plt.subplot(1,2,2)
plt.imshow( self.blurred_image, cmap="gray")
plt.title("Blurred Image") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_image(self):\n cv2.imshow('Image', self.__diff_image())\n cv2.waitKey()",
"def blurImage(self):\n\n print (\"--Blurring Main Image--\")\n self.blurButton.setDown(True)\n im = Image.open(self.ActivePhoto)\n blurred_image = im.filter(ImageFilter.GaussianBlur(1))\n... | [
"0.65680623",
"0.656295",
"0.6562389",
"0.64684945",
"0.64078856",
"0.6385656",
"0.63168657",
"0.6307291",
"0.62678707",
"0.6236219",
"0.6151714",
"0.60914916",
"0.6090772",
"0.608311",
"0.6025253",
"0.5972603",
"0.59622127",
"0.5956339",
"0.5948988",
"0.59457",
"0.59445393",... | 0.82821536 | 0 |
paste a file or directory that has been previously copied | def paste(location):
copyData = settings.getDataFile()
if not location:
location = "."
try:
data = pickle.load(open(copyData, "rb"))
speech.speak("Pasting " + data["copyLocation"] + " to current directory.")
except:
speech.fail("It doesn't look like you've copied anything yet.")
speech.fail("Type 'hallie ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _pasteFile(self) -> None:\n if not self._fileClipboard:\n return\n cut = self._fileClipboard.pop()\n filenames = [x.name for x in self._fileClipboard]\n destPaths = [self._currPath.joinpath(x) for x in filenames]\n try:\n duplicates = []\n for... | [
"0.75675744",
"0.72383505",
"0.69673383",
"0.69241136",
"0.6830915",
"0.6801877",
"0.67643505",
"0.66626996",
"0.6652511",
"0.65969956",
"0.6596979",
"0.64582324",
"0.64567304",
"0.6437227",
"0.6432736",
"0.6422666",
"0.6394669",
"0.6371352",
"0.6369552",
"0.6356414",
"0.6339... | 0.7870862 | 0 |
Display list of bookmarks for any given user | def user_list(request, user_name):
bookmarks = get_list_or_404(Bookmark.objects.all().filter(human__username=user_name))
return render(request, 'urly_bird/any_user_list.html', {'bookmarks': bookmarks}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bookmark(request):\r\n \r\n if request.method == 'GET':\r\n if request.GET.get('path'):\r\n object_list = BookmarkItem.objects.filter(bookmark__user=request.user).order_by('order')\r\n #print urllib.unquote(request.GET.get('path'))\r\n try:\r\n b... | [
"0.71984094",
"0.7148708",
"0.6793257",
"0.67187834",
"0.6641313",
"0.6623683",
"0.633557",
"0.6249334",
"0.62378967",
"0.6236893",
"0.619135",
"0.61078155",
"0.6044722",
"0.59958416",
"0.5948521",
"0.5945683",
"0.5907882",
"0.5876983",
"0.5854694",
"0.58490324",
"0.58267343"... | 0.7803272 | 0 |
Converts cones in the GUIs frame of reference to cones in the lidar's frame of reference and gets those in the lidar's field of view Sets detected_cones with only cones within the lidar's field of view. Sorts the cones by angle starting at 135 degrees. | def lidarScan(self):
# Get cones seen by lidar
lidar_coords = []
for point in self.gui_points:
# Convert from gui frame to lidar frame
x = (point[0] - self.lidar_pos[0])*scaling_factor
y = (self.lidar_pos[1] - point[1])*scaling_factor
# Convert po... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rotationDetermination(self):\n \n for index, row in enumerate(self.magdata):\n if index > 11 and index < (len(self.magdata) - 12):\n br1 = [row[0] for row in self.magdata[(index-12):(index-2)]]\n bt1 = [row[1] for row in self.magdata[(index-12):(index-2)]]... | [
"0.5472272",
"0.5462623",
"0.54448014",
"0.5388763",
"0.535019",
"0.53095686",
"0.52818936",
"0.52364",
"0.52262443",
"0.51484215",
"0.51208395",
"0.5072665",
"0.5054126",
"0.49976072",
"0.49777424",
"0.4968893",
"0.4947722",
"0.49220458",
"0.4889088",
"0.4872785",
"0.4870850... | 0.6696489 | 0 |
Add more connection endpoints. Connection may have many endpoints, mixing protocols and types. | def addEndpoints(self, endpoints):
self.endpoints.extend(endpoints)
self._connectOrBind(endpoints) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def connections_endpoints(request: web.BaseRequest):\n context: AdminRequestContext = request[\"context\"]\n connection_id = request.match_info[\"conn_id\"]\n\n profile = context.profile\n connection_mgr = ConnectionManager(profile)\n try:\n endpoints = await connection_mgr.get_endpoint... | [
"0.6581431",
"0.62893325",
"0.6268082",
"0.6254711",
"0.6199637",
"0.6168774",
"0.6061122",
"0.60520715",
"0.6042876",
"0.6022899",
"0.5892089",
"0.5878343",
"0.5851649",
"0.58505166",
"0.58410054",
"0.5814967",
"0.57680947",
"0.5753131",
"0.572161",
"0.57154876",
"0.57008517... | 0.7782683 | 0 |
Read multipart in nonblocking manner, returns with ready message or raising exception (in case of no more messages available). | def _readMultipart(self):
while True:
self.recv_parts.append(self.socket.recv(constants.NOBLOCK))
if not self.socket_get(constants.RCVMORE):
result, self.recv_parts = self.recv_parts, []
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def doRead(self):\n if self.read_scheduled is not None:\n if not self.read_scheduled.called:\n self.read_scheduled.cancel()\n self.read_scheduled = None\n\n while True:\n if self.factory is None: # disconnected\n return\n\n ev... | [
"0.7202878",
"0.6593352",
"0.6501937",
"0.64703834",
"0.63924545",
"0.6380673",
"0.6375113",
"0.63678604",
"0.6358247",
"0.6302495",
"0.6219391",
"0.6152119",
"0.6034115",
"0.6021782",
"0.6014445",
"0.594972",
"0.59420466",
"0.5936738",
"0.5936738",
"0.59349895",
"0.59290695"... | 0.72168154 | 0 |
Connect and/or bind socket to endpoints. | def _connectOrBind(self, endpoints):
for endpoint in endpoints:
if endpoint.type == ZmqEndpointType.connect:
self.socket.connect(endpoint.address)
elif endpoint.type == ZmqEndpointType.bind:
self.socket.bind(endpoint.address)
else:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bind(self):\n self._conn = socket.socket(socket.AF_INET, self.protocol.value)\n try:\n self._conn.bind((self.host, self.port))\n except OSError as e:\n self.close()\n raise BindError(str(e))\n self._conn.setblocking(False)\n self._conn.listen(... | [
"0.7130075",
"0.7112576",
"0.6801621",
"0.6775071",
"0.6772604",
"0.6653766",
"0.6643562",
"0.66249806",
"0.66118973",
"0.659476",
"0.6579499",
"0.6578894",
"0.6578408",
"0.6554303",
"0.6552846",
"0.654279",
"0.6523097",
"0.651956",
"0.64632034",
"0.6457388",
"0.64059734",
... | 0.7718711 | 0 |
get a single word's wordnet POS (PartofSpeech) tag. | def get_wordnet_pos(self, word):
# token = word_tokenize(word)
base_tag = pos_tag([word])[0][1][:2]
return self.pos_tag_dict.get(base_tag, wordnet.NOUN) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ,\n \"N\": wordnet.NOUN,\n \"V\": wordnet.VERB,\n \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN)",
"def get_wordnet_pos(word):\n tag = nl... | [
"0.7593354",
"0.7576574",
"0.7576574",
"0.7576574",
"0.7576574",
"0.7576574",
"0.7576574",
"0.75293577",
"0.7503421",
"0.7377293",
"0.719029",
"0.71326035",
"0.71208805",
"0.709108",
"0.70407254",
"0.7016819",
"0.6993947",
"0.69855785",
"0.69850004",
"0.6918058",
"0.6874751",... | 0.7755339 | 0 |
Cleans a single review (simplifies it as much as possible) | def clean_review(self, text):
text = text.lower() # lowercase capital letters
if self.remove_stopwords:
text = self.remove_stopwords_f(text, keep_neg_words=True)
text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)
# text = re.sub('[^a-z... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sanitize(review):\n # c) Remove all punctuation, as well as the stop-words.\n # First replace punctuations with empty char then tokenize it\n # Replace punctuation with spaces using fast method\n clean = review.translate(review.maketrans(string.punctuation,\n ... | [
"0.7039309",
"0.6684278",
"0.64323676",
"0.6006801",
"0.5992633",
"0.5959121",
"0.5953015",
"0.5914954",
"0.58682597",
"0.5802497",
"0.5769421",
"0.5647594",
"0.56341743",
"0.559752",
"0.5589405",
"0.55746025",
"0.5561322",
"0.5524036",
"0.55173266",
"0.5471399",
"0.54688805"... | 0.71801054 | 0 |
Cleans a single resume (resume text) | def clean_resume(self, text):
text = text.lower() # lowercase capital letters
text = re.sub(r'(http|www)\S+\s*', '', text) # remove URLs
text = re.sub(r'\S+@\S+\s*', '', text) # remove emails
text = re.sub(r'@\S+\s*', '', text) # remove mentions
text = re.sub(r'#\S+\s*', '',... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cleaning(full_text):\n try:\n if open(RESULT_PATH):\n os.remove(RESULT_PATH)\n \n else:\n print(\"No output.mp3\")\n except Exception as e:\n print(str(e))\n\n text = full_text\n\n book = ''.join(text)\n\n\n book = book.replace('.', '.<eos>')\n ... | [
"0.65953624",
"0.59690464",
"0.5925192",
"0.57667595",
"0.5754863",
"0.572727",
"0.56936944",
"0.5628845",
"0.5601159",
"0.55797684",
"0.55733097",
"0.55511904",
"0.554485",
"0.552263",
"0.55109733",
"0.5505896",
"0.5464441",
"0.5445689",
"0.54276085",
"0.5422343",
"0.5397728... | 0.7648608 | 0 |
Euclidean distance Squared Euclidean distance more frequently used | def euc_dist(self, squared=True): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getEuclideanDistance():\r\n global euclideanDistance\r\n return euclideanDistance",
"def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))",
"def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[in... | [
"0.73311925",
"0.7217947",
"0.72092783",
"0.71997476",
"0.71303356",
"0.70067096",
"0.6991867",
"0.6981154",
"0.69637036",
"0.6960269",
"0.6941405",
"0.69364357",
"0.6935467",
"0.69090146",
"0.68860257",
"0.68558615",
"0.68158317",
"0.6806122",
"0.6798799",
"0.6798116",
"0.67... | 0.7476734 | 0 |
Test elementwise for fill values and return result as a boolean array. | def isfillvalue(a):
a = numpy.asarray(a)
if a.dtype.kind == 'i':
mask = a == -999999999
elif a.dtype.kind == 'f':
mask = numpy.isnan(a)
elif a.dtype.kind == 'S':
mask = a == ''
else:
raise ValueError('Fill value not known for dtype %s' % a.dtype)
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_array_booleans(n: int = 1024, random_seed: int = None) -> TYPE_ARRAY:\n return _RNG.randint(0, 2, n).astype(bool)",
"def __call__(self, size=1):\n\n # A completely empty numpy array\n results = numpy.zeros(self.shape, dtype=bool)\n\n # Gets a set of random indices that need t... | [
"0.60772234",
"0.59158",
"0.5880896",
"0.5725829",
"0.5717447",
"0.57084936",
"0.56891406",
"0.5651735",
"0.5627813",
"0.56104505",
"0.5576027",
"0.5572044",
"0.55715203",
"0.5566624",
"0.55537015",
"0.5498187",
"0.5478841",
"0.54585373",
"0.5433937",
"0.5429966",
"0.5427257"... | 0.5984257 | 1 |
Return the start/stop times in milliseconds since 111970 | def as_millis(self):
return int(ntplib.ntp_to_system_time(self.start) * 1000), int(ntplib.ntp_to_system_time(self.stop) * 1000) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def runtime_cal(start,end) :\n run_time = end - start\n mm = int(run_time/60)\n ss = round(run_time%60)\n return mm, ss",
"def get_time_ms():\n return int(round(time.time() * 1000))",
"def getTimes():",
"def getTimes():",
"def getTimes():",
"def elapsed_micros(start: int, /) -> int:",
"d... | [
"0.6986291",
"0.6958346",
"0.69455504",
"0.69455504",
"0.69455504",
"0.69351584",
"0.6922508",
"0.6904034",
"0.69000614",
"0.6889413",
"0.6834017",
"0.6818947",
"0.6816358",
"0.67783904",
"0.67711294",
"0.67618895",
"0.67332286",
"0.6714469",
"0.6713183",
"0.6713183",
"0.6713... | 0.73437476 | 0 |
Function to recursively check if two dicts are equal | def dict_equal(d1, d2):
if isinstance(d1, dict) and isinstance(d2, dict):
# check keysets
if set(d1) != set(d2):
return False
# otherwise loop through all the keys and check if the dicts and items are equal
return all((dict_equal(d1[key], d2[key]) for key in d1))
# ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dict_equal(d1: Dict, d2: Dict) -> bool:\n\n # iterate over the dict with more keys\n # di is the dictionary to iterate over\n # dj is the one to compare to\n if len(d2) > len(d1):\n di = d2\n dj = d1\n else:\n di = d1\n dj = d2\n for key, value in di.items():\n ... | [
"0.82100755",
"0.7669566",
"0.7605134",
"0.7587031",
"0.7573984",
"0.7369974",
"0.735764",
"0.72046685",
"0.71447515",
"0.70782727",
"0.70460093",
"0.69822705",
"0.6968151",
"0.69324183",
"0.69310194",
"0.69286764",
"0.6905533",
"0.6891169",
"0.6882714",
"0.6858757",
"0.68475... | 0.7747674 | 1 |
Quantify misfit with some example data | def test_default_quantify_misfit(tmpdir):
preprocess = Default(syn_data_format="ascii", obs_data_format="ascii",
unit_output="disp", misfit="waveform",
adjoint="waveform", path_preprocess=tmpdir,
path_solver=TEST_SOLVER, source_prefix="SOURC... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_genextreme_fit(self):\n p = generic.fit(self.genextreme, \"genextreme\")\n np.testing.assert_allclose(p, (0.20949, 297.954091, 75.7911863), 1e-5)",
"def fit(self, X):",
"def test_fit(self):\n X = np.zeros((2, 3), dtype=np.float64)\n snv = SNV(q=50)\n try:\n ... | [
"0.62091",
"0.6153025",
"0.61098146",
"0.6083619",
"0.6002775",
"0.5955667",
"0.59443253",
"0.5894886",
"0.58396775",
"0.57995504",
"0.5769619",
"0.57624996",
"0.5748213",
"0.5731888",
"0.5731888",
"0.5731888",
"0.57293093",
"0.57270885",
"0.57270885",
"0.56752056",
"0.567476... | 0.6377925 | 0 |
Test that the Pyaflowa preprocess class can quantify misfit over the course of a few evaluations (a line search) and run its finalization task Essentially an integration test testing the entire preprocessing module works as a whole | def test_pyaflowa_line_search(tmpdir):
pyaflowa = Pyaflowa(
workdir=tmpdir,
path_specfem_data=os.path.join(TEST_SOLVER, "mainsolver", "DATA"),
path_output=os.path.join(tmpdir, "output"),
path_solver=TEST_SOLVER, source_prefix="SOURCE", ntask=2,
data_case="synthetic", componen... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_predictor():",
"def test_active_inference_SPM_1b(self):",
"def test_preprocess(self):\r\n\r\n # Should discard all reads due to sequence length being too short\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sa... | [
"0.629093",
"0.62761736",
"0.620866",
"0.6105234",
"0.6059339",
"0.6007366",
"0.5925567",
"0.59123164",
"0.5886541",
"0.58703196",
"0.58566153",
"0.5794662",
"0.57678074",
"0.5767124",
"0.5744852",
"0.5740845",
"0.57300746",
"0.5707568",
"0.5697885",
"0.5695476",
"0.56930786"... | 0.6344366 | 0 |
dataList item renderer for Posts on the Bulletin Board. | def cms_post_list_layout(list_id, item_id, resource, rfields, record):
record_id = record["cms_post.id"]
#item_class = "thumbnail"
T = current.T
db = current.db
s3db = current.s3db
settings = current.deployment_settings
permit = current.auth.s3_has_permission
raw = record._row
dat... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serializePostsData(influencer, posts, length_limit=30, highlight=False):\n from debra import serializers\n\n posts_data = []\n urls = set()\n posts = list(posts)\n dated = []\n undated = []\n for post in posts:\n if post.create_date:\n dated.append(post)\n else:\n ... | [
"0.5797765",
"0.5777556",
"0.569845",
"0.5545204",
"0.5486257",
"0.54002476",
"0.5336475",
"0.5334056",
"0.5324463",
"0.5311351",
"0.5307505",
"0.53025407",
"0.52784413",
"0.5277002",
"0.52619624",
"0.52491385",
"0.5233054",
"0.52293384",
"0.5213067",
"0.5183145",
"0.51277083... | 0.5974937 | 0 |
Count need lines per district and status (top 5 districts) for all open Events | def needs_by_district(cls):
T = current.T
db = current.db
s3db = current.s3db
table = s3db.need_line
ntable = s3db.need_need
etable = s3db.event_event
ltable = s3db.event_event_need
status = table.status
number = table.id.count()
locatio... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def needs_by_district(cls):\n\n T = current.T\n\n db = current.db\n s3db = current.s3db\n\n table = s3db.req_need_line\n ntable = s3db.req_need\n\n left = ntable.on(ntable.id == table.need_id)\n\n status = table.status\n number = table.id.count()\n loc... | [
"0.5754938",
"0.551308",
"0.54789215",
"0.5473855",
"0.5299601",
"0.5225415",
"0.519377",
"0.5175258",
"0.50648946",
"0.50605243",
"0.50548464",
"0.5018038",
"0.5015188",
"0.5013341",
"0.50113225",
"0.5009056",
"0.49946627",
"0.49912676",
"0.49895993",
"0.49847665",
"0.497986... | 0.63438565 | 0 |
Write the design to the Specctra format | def write(self, design, filename):
self._convert(design)
with open(filename, "w") as f:
f.write(self._to_string(self.pcb.compose())) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_to_fits(self, filename, comment=None, overwrite = False):\n\n\n hdu = fits.PrimaryHDU(self.flux)\n hdu.header = self.header\n\n # Update header information\n crval = self.dispersion[0]\n cd = self.dispersion[1]-self.dispersion[0]\n crpix = 1\n\n hdu.header... | [
"0.6008693",
"0.5994822",
"0.59387493",
"0.5817487",
"0.5813526",
"0.58061814",
"0.5734599",
"0.5709072",
"0.56894326",
"0.56894326",
"0.5635465",
"0.56147325",
"0.56106025",
"0.5595291",
"0.55891746",
"0.55859977",
"0.5578043",
"0.55598426",
"0.55499566",
"0.55450016",
"0.55... | 0.6873917 | 0 |
Convert a pin into an outline | def _convert_pin_to_outline(self, pin):
pcbshape = specctraobj.Path()
pcbshape.layer_id = 'Front'
pcbshape.aperture_width = self._from_pixels(1)
pcbshape.vertex.append(self._from_pixels((pin.p1.x, pin.p1.y)))
pcbshape.vertex.append(self._from_pixels((pin.p2.x, pin.p2.y)))
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_pin(self, pin, xform):\n # TODO special pin characteristics (inverted, clock)?\n line = [xform.chain(p) for p in (pin.p1, pin.p2)]\n self.canvas.line([(p.x, p.y) for p in line],\n fill=self.options.style['part'])",
"def draw_pins():\n\n pass",
"def add_o... | [
"0.6245729",
"0.6115153",
"0.5688149",
"0.54557323",
"0.5455196",
"0.5401273",
"0.53704077",
"0.5337081",
"0.53153765",
"0.53109276",
"0.52516943",
"0.5228548",
"0.5222575",
"0.51996744",
"0.5138629",
"0.5120291",
"0.5064127",
"0.5062142",
"0.5055162",
"0.504735",
"0.5032255"... | 0.84876585 | 0 |
Convert points to paths | def _points_to_paths(self, points):
prev = points[0]
result = []
for point in points[1:]:
path = specctraobj.Path()
path.aperture_width = self._from_pixels(1)
path.vertex.append(prev)
path.vertex.append(point)
result.append(path)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def full_path_to_points(path):\n\n points_x = [path[0][0]]\n points_y = [path[1][0]]\n\n new_path = path\n prev_turn, new_path = path_to_command_thymio(new_path)\n\n for i in range(len(new_path[0]) - 1):\n\n new_turn, new_path = path_to_command_thymio(new_path)\n\n if new_turn != prev_... | [
"0.66994214",
"0.65940034",
"0.6466729",
"0.6385552",
"0.6291015",
"0.61070466",
"0.6079898",
"0.6022747",
"0.59725976",
"0.59154207",
"0.59085953",
"0.58816546",
"0.57742137",
"0.5762992",
"0.5753632",
"0.57504135",
"0.57386243",
"0.57256794",
"0.5717574",
"0.5676891",
"0.56... | 0.83508885 | 0 |
Returns the metric used in the search | def metric(self):
return self.__metric | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metric(self):\n return self._metric",
"def metric(self) -> str:\r\n return self._metric",
"def metric(self):\n\n if not self._metric_cache:\n # Select an appropriate statistic\n cls = utils.import_class_or_module(self._metric)\n self._metric_cache = cls... | [
"0.73544544",
"0.7343437",
"0.7094367",
"0.6804628",
"0.67208123",
"0.6674424",
"0.65164226",
"0.6417011",
"0.6387913",
"0.63811266",
"0.6376277",
"0.6376277",
"0.6359819",
"0.63471746",
"0.63471746",
"0.63350976",
"0.63268703",
"0.63268703",
"0.63268703",
"0.63268703",
"0.63... | 0.74399334 | 0 |
Select an account and set it as the current 'working' account Calling this method also cleares the Batch Queue, if it isn't empty | def SelectAccount(self, nickname):
self.ClearBatchQueue()
if nickname in self.accounts:
self.current_account = self.accounts[nickname]
self.client = self.current_account.client
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account):\n\n self._account = account",
"def account(self, account: str):\n s... | [
"0.5898519",
"0.5898519",
"0.5898519",
"0.5898519",
"0.58418894",
"0.5787817",
"0.5739551",
"0.57178605",
"0.5570972",
"0.55681026",
"0.55255353",
"0.54933035",
"0.5438046",
"0.54226345",
"0.5285505",
"0.52594006",
"0.52074546",
"0.5121794",
"0.5118399",
"0.50960463",
"0.5094... | 0.6025916 | 0 |
Clear the batch queue | def ClearBatchQueue(self):
self.batch_queue = gdata.contacts.data.ContactsFeed() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clearQueueAll():",
"def clear_queue(self):\n self.queue = deque()",
"def clear(self):\n self.queue.clear()",
"def clear_queue(self):\n while not self.queue.empty():\n self.queue.get()",
"def clear(self):\n self.queue = Queue()",
"def clear_batch(self):\n ... | [
"0.8027347",
"0.79099566",
"0.7851463",
"0.780406",
"0.7732012",
"0.75668514",
"0.7311712",
"0.721093",
"0.721093",
"0.721093",
"0.7194719",
"0.7069694",
"0.70656955",
"0.69665104",
"0.6955623",
"0.6941233",
"0.68873274",
"0.6882734",
"0.68424505",
"0.68145674",
"0.6807092",
... | 0.8334762 | 0 |
Lazily get the first contact group's Atom Id | def GetFirstGroupId(self):
return self.client.GetGroups().entry[0].id.text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_contact_group(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.\n q = \"\"\"select id, name, active from contact_groups where id=%s\"\"\"\n row = await dbcon.fetch_row(q, (id,))\n contact = None\n if row:\n contact = object_models.ContactGroup(*row)\n ... | [
"0.6018908",
"0.5888721",
"0.5825256",
"0.57383466",
"0.56857145",
"0.54320073",
"0.54071623",
"0.538237",
"0.5367063",
"0.5323985",
"0.52931386",
"0.5278044",
"0.52581567",
"0.52283746",
"0.52128196",
"0.51881367",
"0.51880187",
"0.51782674",
"0.51699287",
"0.5155815",
"0.51... | 0.6422962 | 0 |
Remove a contact from the selected account | def RemoveContact(self, contact):
self.client.Delete(contact) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_contact(self):\n contact_mob_num = input(\"-=\" * 30 + \"\\n\" + \"Please enter contact's mobile number to be removed: \")\n contact = self.auth.get_users_by_MobNum(contact_mob_num)\n if (not contact) or contact not in self._user.contacts:\n print('This user not in your c... | [
"0.7927598",
"0.78937054",
"0.76196754",
"0.760548",
"0.7403511",
"0.7339263",
"0.7177775",
"0.71389616",
"0.69924855",
"0.6943864",
"0.68841195",
"0.681138",
"0.6772057",
"0.67245716",
"0.66446775",
"0.6630713",
"0.66240466",
"0.65467685",
"0.653159",
"0.64608634",
"0.642118... | 0.82682854 | 0 |
Remove all contacts from the selected account | def RemoveAll(self):
contacts = self.GetContactList()
for contact in contacts:
self.BatchEnqueue('delete', contact)
self.ExecuteBatchQueue() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_contacts(self):\n self.db.delete_all_contacts()\n return self.update_contacts()",
"def del_contact_all(self):\n\n send_key(KEY_MENU)\n delstr = contact.get_value('contact_delete')\n if search_text(delstr):\n click_textview_by_text(delstr)\n clic... | [
"0.7838508",
"0.73882663",
"0.71244544",
"0.6743833",
"0.6735509",
"0.66522604",
"0.6644226",
"0.65938866",
"0.6535013",
"0.6475446",
"0.63914645",
"0.62559044",
"0.6243891",
"0.6176765",
"0.610713",
"0.6046226",
"0.60396665",
"0.60009325",
"0.598013",
"0.5964348",
"0.5959389... | 0.7585864 | 1 |
Copy all contacts from one account to another This method does not check for duplicates | def CopyContacts(self, from_nickname, to_nickname):
self.SelectAccount(from_nickname)
contacts = self.GetContactList()
self.SelectAccount(to_nickname)
for contact in contacts:
self.BatchEnqueue('create', contact)
self.ExecuteBatchQueue() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\... | [
"0.6812184",
"0.6329817",
"0.5751867",
"0.5707562",
"0.5707562",
"0.5586134",
"0.5383878",
"0.5352327",
"0.5340893",
"0.53407866",
"0.53282636",
"0.53140664",
"0.5285255",
"0.5284708",
"0.52719766",
"0.52107036",
"0.52066034",
"0.51984245",
"0.51651853",
"0.5159195",
"0.51517... | 0.756032 | 0 |
Move all contacts from one account to another This method does not check for duplicates | def MoveContacts(self, from_nickname, to_nickname):
self.SelectAccount(from_nickname)
contacts = self.GetContactList()
# Copy contacts -before- deleting
self.SelectAccount(to_nickname)
for contact in contacts:
self.BatchEnqueue('create', contact)
self.ExecuteBatchQueue()
# Then delete
self.Sele... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MultiWaySync(self, accounts):\n\t\tcleaned_contacts = []\n\t\tcontacts = []\n\t\t\n\t\tfor account in accounts:\n\t\t\tself.SelectAccount(account)\n\t\t\tcontacts.extend(self.GetContactList())\n\t\t\n\t\tduplicates, originals = ceFindDuplicates(contacts)\n\t\tmerged, todelete = ceMergeDuplicates(duplicates)\n\... | [
"0.69824326",
"0.6433265",
"0.57518244",
"0.56264514",
"0.56264514",
"0.5607597",
"0.5603125",
"0.55658954",
"0.5536112",
"0.5534724",
"0.54958487",
"0.5490326",
"0.5480002",
"0.5392368",
"0.53907484",
"0.5382409",
"0.53588146",
"0.5324467",
"0.5263877",
"0.52340406",
"0.5210... | 0.72994787 | 0 |
Perform a multiway sync between given accounts | def MultiWaySync(self, accounts):
cleaned_contacts = []
contacts = []
for account in accounts:
self.SelectAccount(account)
contacts.extend(self.GetContactList())
duplicates, originals = ceFindDuplicates(contacts)
merged, todelete = ceMergeDuplicates(duplicates)
cleaned_contacts.extend(origina... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sync_nas(self, users_from_db: Iterator):",
"def synch_all(cls, account, type, filter=None, *args):\n for repo_data in repositories(account, type, filter):\n repo = cls(repo_data)\n repo.synch(*args)",
"def sync(self, sync_from, sync_to, **kwargs):\n return self.exec_comm... | [
"0.6455885",
"0.6311536",
"0.6180077",
"0.61477345",
"0.6121937",
"0.6006474",
"0.58930635",
"0.5883944",
"0.5810142",
"0.5777375",
"0.5727696",
"0.57057714",
"0.56830674",
"0.5662616",
"0.5652426",
"0.564118",
"0.5630871",
"0.56220114",
"0.5602681",
"0.55613834",
"0.55493385... | 0.7644102 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.