query stringlengths 9 9.05k | document stringlengths 10 222k | metadata dict | negatives listlengths 30 30 | negative_scores listlengths 30 30 | document_score stringlengths 4 10 | document_rank stringclasses 2
values |
|---|---|---|---|---|---|---|
Get the deb822 class to use based on obj | def get_deb822_cls(obj):
if isinstance(obj, basestring):
key = obj.split('/')[-1][:-len('.gz')]
elif isinstance(obj, dict):
# NOTE: Support a resource object
if 'type' in obj:
key = obj['type'].title()
else:
for map_key, cls_key in KEY_TO_NAME:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_obj_class(self, obj_type: str) -> Type[TgnObject]:\n pass",
"def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageP... | [
"0.60468835",
"0.5737325",
"0.5626973",
"0.5541262",
"0.5488912",
"0.5470521",
"0.53820634",
"0.5379896",
"0.535658",
"0.5270914",
"0.52564365",
"0.52383137",
"0.52213126",
"0.5215957",
"0.51678175",
"0.51556087",
"0.5125791",
"0.5117053",
"0.5051719",
"0.504959",
"0.5039547"... | 0.82478124 | 0 |
Get the indexes that represents this Distribution from the underlying Components | def get_indexes(self):
indexes = []
for c in self.components:
indexes.extend(c.get_indexes())
return indexes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def indices(self):\n return self.index.indices",
"def get_indices(self):\r\n return self._indices",
"def getIndices(self):\r\n return self._indices",
"def index(self):\n return self.data.index.values",
"def ... | [
"0.79678136",
"0.78836995",
"0.77245635",
"0.7669747",
"0.7342353",
"0.73129576",
"0.7257382",
"0.7190922",
"0.69870543",
"0.69729394",
"0.68934375",
"0.6891254",
"0.6873979",
"0.6869683",
"0.6851193",
"0.6825982",
"0.6809999",
"0.6809096",
"0.6807874",
"0.67803395",
"0.67631... | 0.8461544 | 0 |
Add a package to a component | def add_package(self, component_name, package):
component = self.get_component(component_name)
component.add_package(package) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_package(self, package):\n obj = package if isinstance(package, Package) else Package(\n component=self, **package)\n self.data['packages'].append(obj)",
"def add_packages(self, component_name, packages):\n for pkg in packages:\n self.add_package(component_name, ... | [
"0.74583834",
"0.69385165",
"0.6926181",
"0.6425457",
"0.64241457",
"0.637839",
"0.6228563",
"0.62157047",
"0.6180625",
"0.6137637",
"0.6119172",
"0.6106267",
"0.60927397",
"0.60533196",
"0.6002774",
"0.5973729",
"0.5969399",
"0.59233946",
"0.5900656",
"0.58961207",
"0.585656... | 0.85479677 | 0 |
Add multiple Packages to a component | def add_packages(self, component_name, packages):
for pkg in packages:
self.add_package(component_name, pkg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_packages(self, packages):\n for p in packages:\n self.add_package(p)",
"def update(self, iterable):\n for package in iterable:\n self.add_package(package)",
"def register_packages(self, module_name, extra_package):\n self.creator_manager.register_packages(modu... | [
"0.73213696",
"0.7041433",
"0.6733623",
"0.66976285",
"0.64817804",
"0.6370093",
"0.63610965",
"0.6145575",
"0.60475343",
"0.59889007",
"0.59589255",
"0.5940141",
"0.5892097",
"0.5846863",
"0.57817626",
"0.5773263",
"0.57322484",
"0.56881267",
"0.56647044",
"0.5653178",
"0.56... | 0.7994629 | 0 |
Adds a package to this Component | def add_package(self, package):
obj = package if isinstance(package, Package) else Package(
component=self, **package)
self.data['packages'].append(obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_package(self, component_name, package):\n component = self.get_component(component_name)\n component.add_package(package)",
"def add_package ( self, package_info, addition_control, **pkg_add_kw ):\n return self._get_package_dir ( package_info ['name'] ).add_package (\n package_... | [
"0.8857589",
"0.74427915",
"0.70480454",
"0.7036367",
"0.69797033",
"0.69747174",
"0.6825071",
"0.6796833",
"0.6796833",
"0.6756401",
"0.67164165",
"0.6667246",
"0.6634431",
"0.66323304",
"0.6548047",
"0.65459543",
"0.6545156",
"0.6496167",
"0.6432102",
"0.6419851",
"0.641108... | 0.83141875 | 1 |
Add a list of packages | def add_packages(self, packages):
for p in packages:
self.add_package(p) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_packages(self, component_name, packages):\n for pkg in packages:\n self.add_package(component_name, pkg)",
"def append_packages(self, packages: Sequence['Package']):\n for package in packages:\n if package.name in self.all_packages_dict:\n logging.error(... | [
"0.7967399",
"0.71899575",
"0.6881099",
"0.67112195",
"0.66414106",
"0.661952",
"0.660106",
"0.65925723",
"0.6552438",
"0.6526353",
"0.646941",
"0.6427331",
"0.6338769",
"0.6301947",
"0.6284635",
"0.6241945",
"0.6221222",
"0.6174528",
"0.6141783",
"0.6097224",
"0.60807425",
... | 0.8461527 | 0 |
Update from a list of indexes | def update_from_indexes(self, data, **kw):
for i in data:
self.update_from_index(i, **kw) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_by_index(df, col, indexs, data):\n for indx in indexs:\n df.loc[indx, col] = data",
"def index_update(tensor, indices, values):\n tensor[indices] = values\n return tensor",
"def _update_in_db(db_values: list, sheet_cells: List[Cell], indices: Iterable[int]) -> List[in... | [
"0.6944268",
"0.68279725",
"0.674803",
"0.6747745",
"0.67436105",
"0.66381496",
"0.66381496",
"0.65306586",
"0.634002",
"0.6184385",
"0.61832535",
"0.61691165",
"0.6095535",
"0.60420585",
"0.60305125",
"0.6021017",
"0.59819454",
"0.59468365",
"0.5943627",
"0.59080184",
"0.590... | 0.80606335 | 0 |
Updates this metadata instance with packages found in the given JSON document. This can be called multiple times to merge multiple repository metadata JSON documents into this instance. | def update_from_json(self, json_string):
parsed = json.loads(json_string)
self.add_packages(parsed.pop('packages', []))
self.data(parsed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_package(self, **kwargs):\n logging.warning('Updating a package removes all existing data. '\n 'If you wish to keep the existing data, use `CachedCKAN.patch_package`.')\n results = self.api.action.package_update(**kwargs)\n self.get_ckan_metadata(True)\n ... | [
"0.6071355",
"0.5914813",
"0.5828914",
"0.5709145",
"0.5708369",
"0.55982107",
"0.5593583",
"0.552553",
"0.5517733",
"0.5470216",
"0.5419332",
"0.53404135",
"0.53404135",
"0.53109956",
"0.52079356",
"0.5178757",
"0.5161923",
"0.5106",
"0.5101378",
"0.50850755",
"0.504053",
... | 0.6851507 | 0 |
Get a list of package resources | def get_package_resources(self):
resources = []
for pkg in self.packages:
resource_data = self.get_resource_data()
resources.extend(pkg.get_resources(resource_data))
return resources | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resources(self):\n res = []\n for resource in self._resources:\n res = res + resource.resources()\n\n return res",
"def resources(self):\n res = []\n for resource in self._resources:\n res = res + resource.resources()\n\n return res",
"def res... | [
"0.7528305",
"0.7528305",
"0.74842477",
"0.7477079",
"0.7378839",
"0.73766977",
"0.73312473",
"0.7212838",
"0.7077163",
"0.70177805",
"0.70177805",
"0.70177805",
"0.68670994",
"0.68647087",
"0.6861097",
"0.6786635",
"0.6760258",
"0.6729682",
"0.6708055",
"0.6708055",
"0.67080... | 0.82839036 | 0 |
Returns the unit key for this package that will uniquely identify it in Pulp. This is the unique key for the inventoried package in Pulp. | def unit_key(self):
data = self.to_dict()
return self.generate_unit_key(*[data[key] for key in UNIT_KEYS]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return pulumi.get(self, \"key\")",
"def key(self) -> str:\n return p... | [
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"0.6798424",
"... | 0.789188 | 0 |
Returns all nonunit key metadata that should be stored in Pulp for this package. This is how the package will be inventoried in Pulp. | def unit_metadata(self):
data = self.to_dict()
metadata = [(k, v) for k, v in data.items() if k not in UNIT_KEYS]
return metadata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def provideExpectedMetaKeys(self):\n return self.metadataKeys, self.metadataParams",
"def keys(self):\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']",
"def get_extra_metadata_keys() -> list[str]:\n keys = [\n \"srow_x\",\n... | [
"0.6587527",
"0.64940816",
"0.645989",
"0.6420536",
"0.63525313",
"0.62840426",
"0.628037",
"0.62474185",
"0.6222178",
"0.6196326",
"0.61875063",
"0.6166716",
"0.6148108",
"0.6105044",
"0.6097268",
"0.60649985",
"0.60649985",
"0.6063789",
"0.6055105",
"0.6055105",
"0.6055105"... | 0.68912286 | 0 |
Construct a relative path based on our own data. | def relative_path(self, data=None):
path_data = data.copy()
for i in ['prefix', 'source_name']:
if not i in path_data:
path_data[i] = getattr(self, i)
return constants.DEB_FILENAME % path_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_relative_path(full_path, prefix='/', split_on='/data/'):\n splits = full_path.split(split_on)\n return os.path.join(prefix, split_on, splits[-1])",
"def dataPath(relative):\n return os.path.join(_dataDir, relative)",
"def get_relative_path(self):\n if self.dip or self.sip or self.repl... | [
"0.7208581",
"0.70153475",
"0.683594",
"0.6769534",
"0.6621445",
"0.6615474",
"0.6516488",
"0.65120804",
"0.6495915",
"0.64829516",
"0.64696205",
"0.64535296",
"0.6452572",
"0.64190155",
"0.6400882",
"0.63507867",
"0.6273181",
"0.6255719",
"0.62451476",
"0.6235075",
"0.622568... | 0.7555825 | 0 |
Replace invalid characters for an Excel sheet name within the ``sheet_name`` with the ``replacement_text``. | def sanitize_excel_sheet_name(sheet_name: str, replacement_text: str = "") -> str:
try:
unicode_sheet_name = _preprocess(sheet_name)
except AttributeError as e:
raise ValueError(e)
modify_sheet_name = __RE_INVALID_EXCEL_SHEET_NAME.sub(replacement_text, unicode_sheet_name)
return modif... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sheet_name(self, name):\n if self.sheet:\n self._newline\n self._cell('')\n\n self._cell(name)\n self.sheet = name",
"def regional_sheet_name(df_column_name):\n\n sheet_name = df_column_name.title().replace('_', '-').replace('-Km2', '-km^2')\n\n # these region... | [
"0.62368625",
"0.5658766",
"0.55839145",
"0.5342836",
"0.52264005",
"0.5170571",
"0.5117008",
"0.49473116",
"0.4930441",
"0.4916277",
"0.49059415",
"0.48987168",
"0.4875026",
"0.4849753",
"0.4812612",
"0.4810664",
"0.47979686",
"0.47912505",
"0.47768623",
"0.4770285",
"0.4766... | 0.85612595 | 0 |
Returns the entropy from the entire DataFrame to the given target. | def get_entropy_df(self, df=None):
if df is None:
df = self.df
target = self.target
entropy = 0
values = df[target].unique()
for value in values:
# Fraction of values of 'value' in target feature
fraction = df[target].value_counts()[value]/le... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def entropy(target_col):\n elements,counts = np.unique(target_col,return_counts = True)\n entropy = np.sum([(-counts[i]/np.sum(counts))*np.log2(counts[i]/np.sum(counts)) for i in range(len(elements))])\n return entropy",
"def entropy(self, dataset, target_attr):\n freq = {} #A dictionary to count... | [
"0.77557856",
"0.7240878",
"0.716849",
"0.7106271",
"0.7048393",
"0.69316816",
"0.65471375",
"0.6519684",
"0.64955693",
"0.6444423",
"0.64319384",
"0.6407732",
"0.63241047",
"0.630956",
"0.62471724",
"0.623521",
"0.6202024",
"0.6142288",
"0.6140276",
"0.6101606",
"0.6101134",... | 0.8257584 | 0 |
Returns the entropy from the given feature to the given target. | def get_entropy_feature(self, feature, df=None):
if df is None:
df = self.df
target = self.target
target_variables = df[target].unique()
variables = df[feature].unique()
entropy = 0
# Aggregate entropy for each unique value in 'feature' feature on each uniqu... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __entropy(self, data_set, target_feature):\n frequencies = self.__calculate_frequency(data_set, target_feature)\n feature_entropy = 0.0\n number_of_values = len(data_set)\n\n # Add entropy for each value in frequencies.\n for frequency in frequencies:\n probability... | [
"0.80103743",
"0.72125024",
"0.6683741",
"0.6581703",
"0.64769596",
"0.6355471",
"0.6313559",
"0.63128346",
"0.6207924",
"0.61398417",
"0.6134875",
"0.6108048",
"0.6105479",
"0.6069138",
"0.60679644",
"0.60459185",
"0.6013914",
"0.60018563",
"0.59844184",
"0.59781724",
"0.594... | 0.7893348 | 1 |
Returns the feature with the lowest entropy given a target variable. | def get_lowest_entropy_feature(self, df=None):
if df is None:
df = self.df
target = self.target
entropies = []
for feature in self.features:
entropies.append(self.get_entropy_df(df=df) - self.get_entropy_feature(feature=feature, df=df))
# Quit growing i... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_entropy_feature(self, feature, df=None):\n if df is None:\n df = self.df\n target = self.target\n\n target_variables = df[target].unique()\n variables = df[feature].unique()\n entropy = 0\n\n # Aggregate entropy for each unique value in 'feature' feature... | [
"0.63987845",
"0.62906563",
"0.62745535",
"0.62361795",
"0.5970269",
"0.58792543",
"0.5835181",
"0.5813218",
"0.57950956",
"0.5747696",
"0.5695478",
"0.5677797",
"0.56756794",
"0.5600471",
"0.5550534",
"0.5540838",
"0.554071",
"0.5539472",
"0.55303925",
"0.5516388",
"0.549865... | 0.74907 | 0 |
Returns a recursively built tree using entropy for determining splits. | def build_tree(self, df=None, tree=None, depth=0):
if df is None:
df = self.df
target = self.target
node = self.get_lowest_entropy_feature(df)
if not node:
print("Pure solution not possible in current branch...")
return tree
variables = df[nod... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __gen_merkle_tree__(self):\n tree_stage = []\n tree_stage_num = int(log2(self.l))\n current_tree_stage = self.keys[1:]\n\n for i in range(0, tree_stage_num):\n tree_stage.insert(i, self.__gen_parent_level_tree__(current_tree_stage))\n current_tree_stage = tree_... | [
"0.70202446",
"0.6813201",
"0.66544646",
"0.66278887",
"0.651889",
"0.6421867",
"0.6334987",
"0.6331442",
"0.62777907",
"0.6258492",
"0.62338763",
"0.6215458",
"0.6194862",
"0.61937433",
"0.6149138",
"0.6122082",
"0.6111612",
"0.60880184",
"0.60734576",
"0.6070742",
"0.605333... | 0.6884574 | 1 |
Calculates frequency distribution of univerasal POS Tags | def pos_tag_counts(doc):
tags = []
for token in doc:
tags.append(token.pos_)
frequency = dict(Counter(tags).most_common())
return frequency | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pos_frequencies(corpus):\n return frequencies(corpus, -2)",
"def freq():",
"def get_num_POS_tags(data, pos_tag):\n pos_count = []\n for tweet in data:\n tokens = nltk.word_tokenize(tweet)\n tags = nltk.pos_tag(tokens)\n counts = Counter([j for i, j in tags])\n total = s... | [
"0.6999544",
"0.6887276",
"0.6551115",
"0.6530391",
"0.6523099",
"0.6506458",
"0.6481359",
"0.64570904",
"0.6456174",
"0.6447249",
"0.63904",
"0.63714606",
"0.6351611",
"0.62936366",
"0.62892985",
"0.6284025",
"0.62587374",
"0.6250596",
"0.62461895",
"0.62247854",
"0.6215164"... | 0.6933762 | 1 |
In this callback we check if the lidar sensor is online the bottom clearance data is set to nan if we are using barometric pressure | def altitude_callback(self, data):
self.altitude = data
self.altitude_bottom_clearance = np.float64(data.bottom_clearance)
if np.isnan(self.altitude_bottom_clearance):
# message is often enough that it should alert the user but shouldn't swamp the console
rospy.logwarn_th... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self):\n if self.temperature != None and self.humidity != None:\n self.sensor.set_environmental_data(self.humidity, self.temperature)\n# Trim away error values.\n new_eco2 = self.sensor.eco2\n if new_eco2 < 65535:\n self.eco2 = new_eco2\n self.tvoc... | [
"0.5760847",
"0.56696296",
"0.5600618",
"0.5469788",
"0.54373187",
"0.54115516",
"0.53911066",
"0.5358881",
"0.5356354",
"0.5347453",
"0.5337583",
"0.5324799",
"0.5323337",
"0.532301",
"0.52751553",
"0.5252866",
"0.52426714",
"0.5241194",
"0.521606",
"0.5205771",
"0.5201203",... | 0.6732945 | 0 |
Extended Function to overide HTML Parser Collect Word | def handle_data(self, data):
# Extended from HTML Parser
# Overied this function for collecting word from webpage and store in to a list
words = data.split()
for word in words:
if word.isalpha() == True: # This is for filtering only alphabet string.
self... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(self, word):\n raise NotImplementedError",
"def parser(self, value):\n value = self.lowercase(value)\n value = self.punctuation(value)\n value = self.tokenization(value)\n value = self.remove_stopwords(value)\n value.append(\"wiki\")\n searched_words = \... | [
"0.62091404",
"0.61985904",
"0.603075",
"0.6020231",
"0.59993106",
"0.5990653",
"0.5796557",
"0.5792185",
"0.57910925",
"0.5778996",
"0.57431465",
"0.57394904",
"0.5726501",
"0.56900585",
"0.56900585",
"0.5642334",
"0.5611355",
"0.56076735",
"0.5604131",
"0.5571104",
"0.55648... | 0.62500596 | 0 |
Sort hypotheses according to their log probability. | def sort_hyps(self, hyps):
return sorted(hyps, key=lambda h: h.avg_log_prob, reverse=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _logp(self, trace, **inputs):\n def calc_log(step):\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_prob = norm.logpdf(x=inputs['gwas_phen'],\n ... | [
"0.5838612",
"0.5797235",
"0.5788106",
"0.57859063",
"0.5765085",
"0.56167305",
"0.56062204",
"0.5595072",
"0.5560781",
"0.5496417",
"0.54868",
"0.5470694",
"0.5460611",
"0.54308903",
"0.542925",
"0.5401365",
"0.5391079",
"0.5382952",
"0.5363151",
"0.5355698",
"0.53490305",
... | 0.76663876 | 1 |
Generates attribute filter function for the given attributes value The attributes value can take one of several shapes. This returns a filter function appropriate to the attributes value. One nice thing about this is that there's less if/then shenanigans in the ``allow_token`` method. | def attribute_filter_factory(attributes):
if callable(attributes):
return attributes
if isinstance(attributes, dict):
def _attr_filter(tag, attr, value):
if tag in attributes:
attr_val = attributes[tag]
if callable(attr_val):
retur... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_attributes_choices(self): \n filter_attributes = [\n 'no filters',\n 'user_id',\n 'device_id',\n 'device_first_seen_ts',\n 'device_first_view_ts', \n 'platform',\n 'platform_type',\n ... | [
"0.5660973",
"0.5379473",
"0.534261",
"0.5250468",
"0.5212326",
"0.5194705",
"0.517152",
"0.51187414",
"0.50908965",
"0.5085648",
"0.49764767",
"0.48972845",
"0.48662314",
"0.4835603",
"0.48243764",
"0.48073313",
"0.4796485",
"0.47842774",
"0.47827226",
"0.4765363",
"0.476034... | 0.7323271 | 0 |
Creates a BleachSanitizerFilter instance | def __init__(self, source, attributes=ALLOWED_ATTRIBUTES,
strip_disallowed_elements=False, strip_html_comments=True,
**kwargs):
self.attr_filter = attribute_filter_factory(attributes)
self.strip_disallowed_elements = strip_disallowed_elements
self.strip_html_co... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(self, text):\n if not isinstance(text, six.string_types):\n raise TypeError('argument must of text type')\n\n if not text:\n return u''\n\n text = force_unicode(text)\n\n dom = self.parser.parseFragment(text)\n filtered = BleachSanitizerFilter(\n ... | [
"0.5844354",
"0.5506664",
"0.5395552",
"0.53924406",
"0.53181165",
"0.52993613",
"0.528205",
"0.52735686",
"0.525211",
"0.52488923",
"0.5222469",
"0.5220284",
"0.5183616",
"0.51724327",
"0.5108",
"0.5105979",
"0.5087034",
"0.5083709",
"0.5063567",
"0.50324994",
"0.5003623",
... | 0.716723 | 0 |
Sanitize a token either by HTMLencoding or dropping. | def sanitize_token(self, token):
token_type = token['type']
if token_type in ['StartTag', 'EndTag', 'EmptyTag']:
if token['name'] in self.allowed_elements:
return self.allow_token(token)
elif self.strip_disallowed_elements:
return None
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_token(token):\n cleaned_token = token\n if cleaned_token in NORMALIZE_DICT:\n cleaned_token = NORMALIZE_DICT[cleaned_token]\n\n if cleaned_token not in REMOVED_CHAR:\n for char in REMOVED_CHAR:\n cleaned_token = cleaned_token.replace(char, u'')\n\n if len(cleaned_toke... | [
"0.6213719",
"0.61599076",
"0.6135648",
"0.61082906",
"0.6071557",
"0.6028807",
"0.60134274",
"0.6010262",
"0.6005891",
"0.5977613",
"0.5893458",
"0.58391213",
"0.5737908",
"0.57283384",
"0.5718727",
"0.5710873",
"0.56932616",
"0.5682348",
"0.56683916",
"0.5661966",
"0.564721... | 0.72170484 | 0 |
Handles Characters tokens Our overridden tokenizer doesn't do anything with entities. However, that means that the serializer will convert all ``&`` in Characters tokens to ``&``. Since we don't want that, we extract entities here and convert them to Entity tokens so the serializer will let them be. | def sanitize_characters(self, token):
data = token.get('data', '')
if not data:
return token
data = INVISIBLE_CHARACTERS_RE.sub(INVISIBLE_REPLACEMENT_CHAR, data)
token['data'] = data
# If there isn't a & in the data, we can return now
if '&' not in data:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def special_tokens(self, ):\n\n if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None:\n special_tokens = self.tokenizer.build_inputs_with_special_tokens([])\n special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens)\n self.tokenizer.bo... | [
"0.57361805",
"0.5715559",
"0.5693421",
"0.5683378",
"0.5586623",
"0.5558785",
"0.55293304",
"0.5496346",
"0.54673356",
"0.5376477",
"0.5312388",
"0.5287163",
"0.52551013",
"0.5237161",
"0.52218133",
"0.5212501",
"0.5194779",
"0.515802",
"0.5128146",
"0.5125173",
"0.5115463",... | 0.7078704 | 0 |
Sanitizes css in style tags | def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
# Validate the css in the style tag and if it's not valid, then drop
# the whole thing.
parts = style.split(';')
gauntlet = re.compile(
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def condense_style(html): # May look silly but Emmet does this and is wrong.\n log.debug(\"Condensing HTML Style CSS tags.\")\n return html.replace('<style type=\"text/css\">', '<style>').replace(\n \"<style type='text/css'>\", '<style>').replace(\n \"<style type=text/css>\", '<style>')",
... | [
"0.75775725",
"0.7063994",
"0.6838947",
"0.67956835",
"0.6540166",
"0.65271175",
"0.61195105",
"0.6116861",
"0.60196775",
"0.6014334",
"0.5951415",
"0.5801298",
"0.57960355",
"0.579041",
"0.5779055",
"0.57478005",
"0.5692764",
"0.56792575",
"0.56675595",
"0.5645531",
"0.56421... | 0.7883569 | 0 |
something like pythonfoo is pythonpythonfoo as spec requirement | def test_starting_with_python(self):
self.assertEqual({"python-python-xyz": None},
pr.sanitize_requirements(["python-xyz"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))",
"def test_2x_only_python_version_deploy():\n pass",
"... | [
"0.685132",
"0.65986437",
"0.65244454",
"0.6404376",
"0.63750374",
"0.63698786",
"0.6258343",
"0.6173323",
"0.61222714",
"0.60513926",
"0.6050884",
"0.6039168",
"0.59027624",
"0.58682185",
"0.58572984",
"0.580315",
"0.58021176",
"0.5800806",
"0.57969373",
"0.5785427",
"0.5772... | 0.769921 | 0 |
allow markers in requirement lines;multiple versions specified | def test_with_markers_and_lowest_version(self):
self.assertEqual(
{"python-futures": "3.0"},
pr.sanitize_requirements(
["futures>=3.0,<=4.1,!=4.0;python_version=='2.7'"
"or python_version=='2.6'"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))",
"def _update_properties_file(self, lines, filename):\n ... | [
"0.68128824",
"0.6486892",
"0.64829147",
"0.6400448",
"0.6394141",
"0.6372991",
"0.63658035",
"0.6328819",
"0.63199604",
"0.63058853",
"0.6260977",
"0.62569046",
"0.622087",
"0.6197919",
"0.6197732",
"0.6161421",
"0.61268747",
"0.6084504",
"0.60470927",
"0.6019284",
"0.601563... | 0.69502366 | 0 |
Ignore requirements with win32 marker | def test_skip_windows_requires(self):
self.assertEqual(
{"python-true": "1"},
pr.sanitize_requirements(
["true>=1",
"wmi;sys_platform=='win32'"])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_skip_unless_windows(self):\n pass",
"def test_skip_if_windows(self):\n pass",
"def missing_gdk(finder, caller):\n caller.IgnoreName(\"gdk\")",
"def skip_on_windows (func):\n import sys\n\n return skip_if(sys.platform.startswith('win'))(func)",
"def missing_EasyDialogs(finder... | [
"0.6767136",
"0.67545867",
"0.6513805",
"0.635709",
"0.6211686",
"0.60411996",
"0.5990416",
"0.5737507",
"0.56727284",
"0.56727284",
"0.5605445",
"0.5456626",
"0.54284465",
"0.5417463",
"0.539966",
"0.5398191",
"0.5334668",
"0.53026396",
"0.5255139",
"0.5179256",
"0.514295",
... | 0.72444594 | 0 |
Ignore requirements with python3 marker | def test_skip_python3_requires(self):
self.assertEqual(
{"python-ovs": "2.5.0"},
pr.sanitize_requirements(
["ovs>=2.5.0;python_version=='2.7' # Apache-2.0",
"ovs>=2.6.0.dev1;python_version>='3.4' # Apache-2.0"])
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_with_markers_and_lowest_version(self):\n self.assertEqual(\n {\"python-futures\": \"3.0\"},\n pr.sanitize_requirements(\n [\"futures>=3.0,<=4.1,!=4.0;python_version=='2.7'\"\n \"or python_version=='2.6'\"]))",
"def test_starting_with_python(sel... | [
"0.7039466",
"0.7032259",
"0.7014665",
"0.67280644",
"0.64697397",
"0.6431665",
"0.64238596",
"0.6393986",
"0.63385063",
"0.6160169",
"0.6138403",
"0.60360676",
"0.6019412",
"0.58904576",
"0.5868022",
"0.5848948",
"0.582707",
"0.5823058",
"0.582071",
"0.58111125",
"0.57750684... | 0.78808457 | 0 |
Save a mask to filename. | def save_mask2file(mask, filename, crs=None, transform=None):
height, width = mask.shape
if mask.dtype == np.bool:
mask = mask.astype(np.uint8)
with rasterio.open(
filename,
'w',
driver='GTiff',
dtype=mask.dtype,
count=1,
width=... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_mask(self, path):\n if self.im is None:\n raise ValueError('You didnt call the MaskOverlay, therefore there is no image in memory')\n np.save(path, self.mask)",
"def save_overlay(self, path):\n if self.im is None:\n raise ValueError('You didnt call the MaskOver... | [
"0.74468195",
"0.681574",
"0.67942196",
"0.64891225",
"0.64773023",
"0.62801504",
"0.62455696",
"0.62434334",
"0.6102407",
"0.6039554",
"0.60331523",
"0.5974805",
"0.5962826",
"0.5906069",
"0.5905",
"0.58839047",
"0.58183604",
"0.5792057",
"0.5689327",
"0.5682265",
"0.5618511... | 0.72517014 | 1 |
Load a binary mask from filename into a numpy array. mask The mask image loaded as a numpy array | def load_mask_from_file(filename):
mask = imread(filename)
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_signal_mask(self, path):\n mask = np.load(path)",
"def load_mask(filename):\n nib_image = nib.load(filename)\n mask_affine = nib_image.affine\n\n return preprocess_nib(nib_image, is_mask=True), mask_affine",
"def load_mask(self):\n mask_file = fetch_one_file(self.ica_dir, self._... | [
"0.8149826",
"0.75025517",
"0.7336277",
"0.72396976",
"0.69996434",
"0.69910765",
"0.69326127",
"0.684793",
"0.6726304",
"0.671339",
"0.6681663",
"0.66645074",
"0.6654374",
"0.66409326",
"0.6637058",
"0.66202277",
"0.66072327",
"0.65920156",
"0.65165544",
"0.6504364",
"0.6496... | 0.78713435 | 1 |
Load a mask from a shapefile. | def load_mask_from_shapefile(filename, shape, transform):
multipolygon, _ = load_shapefile2multipolygon(filename)
mask = multipolygon2mask(multipolygon, shape, transform)
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_mask_from_file(filename):\n mask = imread(filename)\n\n return mask",
"def load_signal_mask(self, path):\n mask = np.load(path)",
"def load_mask(self):\n mask_file = fetch_one_file(self.ica_dir, self._mask_fname, pat_type='re.match')\n return niimg.load_img(mask_file)",
"d... | [
"0.7431536",
"0.7378019",
"0.71810657",
"0.71287495",
"0.7058122",
"0.70275694",
"0.682493",
"0.66390985",
"0.6612292",
"0.6592753",
"0.6572625",
"0.65397227",
"0.6522591",
"0.6463834",
"0.64407456",
"0.64399445",
"0.64317584",
"0.64289963",
"0.63150597",
"0.6312492",
"0.6294... | 0.8144621 | 0 |
Compute a mask based on an NDXI feature. | def get_ndxi_mask(generator, feature=NirNDVI):
windows = (generator.step_size, )
values = next(extract_features([feature(windows)], generator)).vector
values.shape = (values.shape[0], values.shape[1])
mask = np.array(values.mask)
unmasked_values = np.array(values[~values.mask])
mask[~mask] = unm... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_contest_mask():\n return createmaskdf(\"data/fcstrodeo_nctemplates/fcstrodeo_mask.nc\")",
"def _get_mask(self, anno, idx):\n coco = self.coco\n img_info = coco.loadImgs(self.img_ids[idx])[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for... | [
"0.64785165",
"0.6324115",
"0.6131358",
"0.6096008",
"0.6096008",
"0.60605854",
"0.6017692",
"0.5902445",
"0.58749795",
"0.586848",
"0.5867567",
"0.5862303",
"0.584124",
"0.5813988",
"0.5810991",
"0.58052564",
"0.5790705",
"0.57692015",
"0.5762006",
"0.5745222",
"0.57375586",... | 0.7317054 | 0 |
Run computation on node performing `op_fun`. `op_fun` has to accept a node as an argument. | def run_op_node(input_data, op_fun, *args):
runtime = get_runtime()
comp_args = []
op_fun_args = []
comp_inputs = []
for idx, data in enumerate(input_data):
if np.isscalar(data):
op_fun_args.append(ng.constant(data, _get_numpy_dtype(data)))
else:
node = ng.par... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_op_numeric_data(input_data, op_fun, *args):\n runtime = get_runtime()\n node = op_fun(input_data, *args)\n computation = runtime.computation(node)\n return computation()",
"def __call__(self, tf_node, input_ops):\n op_name = tf_node.op\n\n # if op not handled, gets -1\n n... | [
"0.68987066",
"0.63727605",
"0.61560744",
"0.60866606",
"0.59173733",
"0.589501",
"0.58199143",
"0.56726414",
"0.5670642",
"0.5646401",
"0.5547242",
"0.5536755",
"0.5459274",
"0.5457556",
"0.54173595",
"0.53511155",
"0.534198",
"0.5328317",
"0.53263",
"0.52942103",
"0.5292017... | 0.7836884 | 0 |
Run computation on node performing `op_fun`. `op_fun` has to accept a scalar or an array. | def run_op_numeric_data(input_data, op_fun, *args):
runtime = get_runtime()
node = op_fun(input_data, *args)
computation = runtime.computation(node)
return computation() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_op_node(input_data, op_fun, *args):\n runtime = get_runtime()\n comp_args = []\n op_fun_args = []\n comp_inputs = []\n for idx, data in enumerate(input_data):\n if np.isscalar(data):\n op_fun_args.append(ng.constant(data, _get_numpy_dtype(data)))\n else:\n ... | [
"0.8162125",
"0.6447787",
"0.6189435",
"0.596804",
"0.5792386",
"0.5773607",
"0.5733637",
"0.56997246",
"0.5688552",
"0.5668171",
"0.5667309",
"0.55916333",
"0.55768436",
"0.55464166",
"0.5527756",
"0.54990244",
"0.54635894",
"0.5456901",
"0.5447017",
"0.5434432",
"0.54329336... | 0.7614549 | 1 |
Return the latest version of the given schema | def get_latest_version(self, name):
return self.filter(name=name).order_by('schema_version').last() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return l... | [
"0.7790253",
"0.7104092",
"0.7003174",
"0.6998072",
"0.6989781",
"0.6966129",
"0.68279827",
"0.67498225",
"0.6675044",
"0.64771247",
"0.64744395",
"0.6458408",
"0.6391964",
"0.63075215",
"0.629649",
"0.6258136",
"0.62317574",
"0.61514646",
"0.6149739",
"0.6116614",
"0.6109423... | 0.75875497 | 1 |
Validates `registration_responses` against this schema (using `schema_blocks`). Raises `ValidationError` if invalid. Otherwise, returns True. | def validate_registration_responses(self, registration_responses, required_fields=False):
validator = RegistrationResponsesValidator(self.schema_blocks.all(), required_fields)
return validator.validate(registration_responses) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(self, registration):\n self.l.info(\"Starting registration validation\")\n\n validation_errors = []\n\n # Check if registrant_id is a valid UUID\n if not utils.is_valid_uuid(registration.registrant_id):\n validation_errors += [\"Invalid UUID registrant_id\"]\n\n ... | [
"0.57850087",
"0.56401217",
"0.5630611",
"0.5599469",
"0.55679965",
"0.55353653",
"0.53955424",
"0.5328068",
"0.530389",
"0.5194013",
"0.5185546",
"0.5152043",
"0.5151474",
"0.5126505",
"0.5081167",
"0.5079145",
"0.5051262",
"0.5037521",
"0.50278944",
"0.50093704",
"0.4979922... | 0.8683601 | 0 |
Allows us to use a unique_together constraint, so each "registration_response_key" only appears once for every registration schema. To do this, we need to save empty "registration_response_key"s as null, instead of an empty string. | def save(self, *args, **kwargs):
self.registration_response_key = self.registration_response_key or None
return super().save(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def record_unique_together_applied(model_sig):\n model_sig['meta']['__unique_together_applied'] = True",
"def unique_together(self):\n if self._meta.unique_together:\n return self._meta.unique_together[0]\n return ()",
"def apply_unique_together(self, unique_together):\n self... | [
"0.57599163",
"0.56867594",
"0.5517179",
"0.53901464",
"0.5270959",
"0.5225833",
"0.517888",
"0.5130246",
"0.5084795",
"0.5074656",
"0.5059439",
"0.49477896",
"0.48854136",
"0.47908023",
"0.47602364",
"0.46850708",
"0.46745723",
"0.46619073",
"0.46608734",
"0.46590465",
"0.46... | 0.64849234 | 0 |
Return True if frame is mergable with self instance. | def mergable(self, frame):
for pos in self.srcList:
if pos in frame.srcList:
return True
for pos in self.tgtList:
if pos in frame.tgtList:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merged(self) -> bool:\n return pulumi.get(self, \"merged\")",
"def is_merged(self):\n return self.get_data(\"state\") == self.STATE_MERGED",
"def can_overlap(self):\n return self.is_open",
"def can_overlap(self):\n return self.is_open",
"def can_overlap(self):\n retur... | [
"0.6856943",
"0.66920245",
"0.612307",
"0.612307",
"0.60129476",
"0.5976742",
"0.5958957",
"0.5924997",
"0.58609396",
"0.58477247",
"0.57797647",
"0.5731097",
"0.5731097",
"0.5692386",
"0.56780577",
"0.5652896",
"0.5599992",
"0.55998355",
"0.55879635",
"0.55879635",
"0.556510... | 0.77469105 | 0 |
Merge two Frame instances frame1 and frame 2, return merged Frame instance. frame1 and frame2 must be on the same tree pair. | def merge(cls, frame1, frame2):
return cls(list(set(frame1.srcList+frame2.srcList)), list(set(frame1.tgtList+frame2.tgtList)), frame1.srcTree, frame1.tgtTree) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge(t1, t2):\n if t2 is None:\n return t1\n if t1 is None:\n return t2\n\n t1 = _splay(_find_max(t1))\n t1.right = t2\n t2.parent = t1\n return t1",
"def blend_frames(self, frame0, frame1, blend):\n root_pos0 = self.get_frame_root_pos(frame0)\n root_pos1 = self.get_fra... | [
"0.62682045",
"0.59971756",
"0.5608523",
"0.5507865",
"0.5435409",
"0.5363136",
"0.5361167",
"0.5360327",
"0.53100735",
"0.5293331",
"0.52818555",
"0.5263779",
"0.5189165",
"0.51624644",
"0.5137856",
"0.5104978",
"0.5097869",
"0.5088363",
"0.5084046",
"0.50600046",
"0.4945050... | 0.8000494 | 0 |
Return the offset of the first word and the last word for every subtree in the list. | def treeposition2offsetPosition(subTrPosList, tr):
offsetList = []
cnt = 0
for pos in subTrPosList:
par = tr[pos]
while par != tr:
for i in xrange(par.parent_index()):
if isinstance(par.parent()[i], nltk.ParentedTree):
cnt += len(par.parent()[i].leaves())
else:
print >> debug_log, ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _gen_loc_words(word_list: list):\r\n loc = 0\r\n res = []\r\n for word in word_list:\r\n res.append((loc, word))\r\n loc += len(word)\r\n return res",
"def get_words_position(self, words: List[Word]) -> Tuple[int, int]:\n start: int = self.get_word_pos... | [
"0.61927235",
"0.61151785",
"0.60680246",
"0.59052956",
"0.5831214",
"0.58253384",
"0.57263756",
"0.5528103",
"0.5399332",
"0.533705",
"0.5324308",
"0.53128594",
"0.5294802",
"0.52609503",
"0.5232178",
"0.5219642",
"0.52062565",
"0.5159778",
"0.51504004",
"0.51499826",
"0.514... | 0.61881554 | 1 |
Use subtreeAlignFunc to pick one subtree alignment from this frame. | def subtreeAlign(self, subtreeAlignFunc, srcTr, tgtTr):
self.subtreeAlignment_treepos = subtreeAlignFunc(self, srcTr, tgtTr)
self.subtreeAlignment_waMatrixPos = [self.treeposition2waMatrixPosition(suba[0], suba[1], srcTr, tgtTr) \
for suba in self.subtreeAlignment_treepos] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subtreeAlign(self, subtreeAlignFunc):\n\t\tfor frame in self.frameList:\n\t\t\tframe.subtreeAlign(subtreeAlignFunc, self.srcTree, self.tgtTree)",
"def rotate_subtree_left(subtree):\n right = subtree.right\n subtree.right = right.left\n right.left = subtree\n right.colour = subtree.colour\n sub... | [
"0.7943352",
"0.49963066",
"0.48671794",
"0.48312488",
"0.48312488",
"0.46881577",
"0.4671619",
"0.46248424",
"0.46107876",
"0.46053573",
"0.46003407",
"0.45801792",
"0.4549547",
"0.4548548",
"0.45392197",
"0.4525178",
"0.44993445",
"0.4491107",
"0.44905266",
"0.44830802",
"0... | 0.77718335 | 1 |
Return a dictionary, keys are 2tuples, each tuple (i, j) represents a subtree span covering from word i to word j1, values are the corresponding subtree treeposition. Here we only keep subtree spans, but not leaf spans, i.e. one layer subtrees like (NNP China) are ignored. [experiment] include one layer subtrees like (... | def _extractSubtreeSpan_(self, tree, wordRulesFlag):
spanDict = {}
snt = tree.leaves()
#pdb.set_trace()
for subtree in tree.subtrees():
if not wordRulesFlag and subtree.height() <= 2:
continue
span = Frame.treeposition2offsetPosition([subtree.treeposition()], tree)[0]
spanDict[(span[0], span[1])] ... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_grouped_items(self):\n word_map = dict()\n it = self.root\n \n # Get all immediate children of root.\n for prefix, node in self.root.children.items():\n prefix_key = ''\n it = node\n # Continue with each child of root node till it has one ... | [
"0.591011",
"0.5883264",
"0.5746557",
"0.5689263",
"0.5616009",
"0.54649633",
"0.5443807",
"0.5409442",
"0.53955895",
"0.53509617",
"0.53248715",
"0.5319948",
"0.53106767",
"0.52917504",
"0.52781814",
"0.52756643",
"0.52704436",
"0.5250692",
"0.52246916",
"0.52226436",
"0.521... | 0.75793076 | 0 |
Return True if "span" is consistent with the word alignment on language "lan", else return False. | def _consistentWithWA_(self, span, lan):
if lan == 'src':
wordAlign = self.waMatrix
else:
wordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))]
pos1 = [j for i in xrange(span[0], span[1]) for j in xrange(len(wordAlign[i])) if wordAlign[i][j] == 1]
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find(self) -> bool:\n alignments = []\n for sw_idx in range(len(self.sw)):\n for nu_idx in range(len(self.nu)):\n alignments.append(Alignment(self.nu, self.sw, nu_idx, sw_idx, self.orig_nu))\n alignment = max(alignments, key=lambda align: align.score)\n if ... | [
"0.63992625",
"0.6315273",
"0.60882205",
"0.58788294",
"0.58561766",
"0.56875026",
"0.5668905",
"0.56449294",
"0.56140864",
"0.5543863",
"0.5537767",
"0.5527828",
"0.54822993",
"0.54472035",
"0.5431362",
"0.54153144",
"0.5383697",
"0.5379699",
"0.53690237",
"0.5358199",
"0.53... | 0.7869109 | 0 |
Return a 2tuple, which is a span of the other language, representing the corresponding span of the "span" in "lan". | def _scanSpan_(self, span, lan):
#pdb.set_trace()
if lan == 'src':
wordAlign = self.waMatrix
else:
wordAlign = [[self.waMatrix[i][j] for i in xrange(len(self.waMatrix))] for j in xrange(len(self.waMatrix[0]))]
otherSpan = [MAX, MIN]
for i in xrange(span[0], span[1]):
for j in xrange(len(wordAlig... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getspaninfo(lnode, rnode):\n try:\n eduspan = (lnode.eduspan[0], rnode.eduspan[1])\n except TypeError:\n print lnode.prop, rnode.prop\n print lnode.nucspan, rnode.nucspan\n return eduspan",
"def spanToTuple(self, span):\n\t\ttry:\n\t\t\tspanBegin, spanEnd = span.split(',')\n\t... | [
"0.5928464",
"0.57783765",
"0.55528563",
"0.54318804",
"0.5424503",
"0.53724504",
"0.53511155",
"0.53242594",
"0.53223926",
"0.5250803",
"0.5224226",
"0.5191113",
"0.51670516",
"0.51624835",
"0.5142095",
"0.51099247",
"0.5045781",
"0.5033951",
"0.49898773",
"0.4979068",
"0.49... | 0.6735803 | 0 |
For each frame in this sentence, pick one subtree alignment from it. | def subtreeAlign(self, subtreeAlignFunc):
for frame in self.frameList:
frame.subtreeAlign(subtreeAlignFunc, self.srcTree, self.tgtTree) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_alignment_from(tree):\r\n msa = []\r\n for node in tree.get_terminals():\r\n alignment = self.msa_by_name[node.name.split(' ')[0]]\r\n if msa:\r\n msa.append(alignment)\r\n else:\r\n msa = MultipleSeqAl... | [
"0.61281717",
"0.5682574",
"0.5621451",
"0.5610291",
"0.5484573",
"0.5340691",
"0.53166825",
"0.52982336",
"0.52693313",
"0.52550215",
"0.5244625",
"0.51851237",
"0.51479876",
"0.5139947",
"0.5126392",
"0.50898707",
"0.50732404",
"0.5068625",
"0.50669676",
"0.50468457",
"0.50... | 0.623596 | 0 |
Calculates the length of the tuples inside a list | def element_length(lst: Iterable[Sequence]) -> List[Tuple[Sequence, int]]:
return [(i, len(i)) for i in lst] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lsize( lst ):\n return sum( [ x[1] for x in lst ] )",
"def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n",
"def get_list_length(self):\n n = 0\n l = self\n while l.is_block():\n n+=1\n l = l.field(1)\n ... | [
"0.7522205",
"0.7252191",
"0.7017774",
"0.69745976",
"0.69745976",
"0.6790512",
"0.6776123",
"0.67288065",
"0.6696",
"0.66074085",
"0.6599637",
"0.6568948",
"0.65648407",
"0.6536912",
"0.64823395",
"0.6475998",
"0.6471026",
"0.6456544",
"0.6444813",
"0.6433039",
"0.64150035",... | 0.797556 | 0 |
returns json data for requested digg endpoint | def get_json(endpoint):
url = ''.join([
'http://services.digg.com',
endpoint,
'?appkey=%s' % APPKEY,
'&type;=json',
])
return urllib2.urlopen(url).read() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_data_api(self, endpoint):\n\n url = 'https://api.gdax.com' + endpoint\n res = requests.get(url)\n\n if res.status_code == 200:\n return res.json()\n else:\n raise ValueError(res.content)",
"def get_json(self):\n url = 'http://lkd.to/api/' + sel... | [
"0.63296825",
"0.632292",
"0.6256466",
"0.61851496",
"0.6035162",
"0.6035162",
"0.6023629",
"0.6001173",
"0.5972762",
"0.59639215",
"0.59509265",
"0.5947116",
"0.58656496",
"0.58101904",
"0.57769",
"0.5770559",
"0.5740533",
"0.5736846",
"0.5727516",
"0.5722231",
"0.5714382",
... | 0.7030758 | 0 |
Exit when the Python version is too low. | def check_python_version():
if sys.version_info < MINIMUM_PYTHON_VERSION:
sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_python_version():\n version = sys.version.split()[0]\n if version < \"2.6\" or version >= \"3\":\n raise CuckooStartupError(\"You are running an incompatible version of Python, please use 2.6 or 2.7\")",
"def _check_python_version(min_version):\n if sys.version_info < min_version:\n ... | [
"0.8062994",
"0.7595006",
"0.74613327",
"0.73725754",
"0.7319089",
"0.7166508",
"0.7116363",
"0.7087189",
"0.69772106",
"0.69696325",
"0.68991804",
"0.682904",
"0.6719451",
"0.6688046",
"0.66628706",
"0.665088",
"0.6538358",
"0.64709836",
"0.6450609",
"0.6290435",
"0.6257957"... | 0.81366074 | 1 |
Packet waiting & service loop | def run(self):
waiting_packet = None
while True:
if waiting_packet is not None:
packet = waiting_packet
waiting_packet = None
else:
packet = yield self.buffer.get()
self.channel.add_sender(self)
yiel... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_service_loop(self):\n\t\n\tprint \"Attempting to receive file\", self.file_read, \"from\", self.ip, \"at port\", self.port, \".\" \n\trecv_data = None\n\tnum_retransmits = 0\n\t#Start timer, retransmit after each timeout of one second. If receive response within the timer, move on to next step. \n\t#Limit... | [
"0.6621941",
"0.6615155",
"0.65084773",
"0.64677125",
"0.6462943",
"0.6385985",
"0.63236433",
"0.632002",
"0.63187605",
"0.6293183",
"0.6292364",
"0.6276911",
"0.62755835",
"0.6266912",
"0.6254571",
"0.62525535",
"0.6221155",
"0.6208616",
"0.6160476",
"0.61516273",
"0.6144330... | 0.6794869 | 0 |
Build arguments for the Rally task. | def build_task_args(self, test_name):
task_args = {'service_list': [test_name]}
task_args['image_name'] = str(self.image.name)
task_args['flavor_name'] = str(self.flavor.name)
task_args['flavor_alt_name'] = str(self.flavor_alt.name)
task_args['glance_image_location'] = str(self.f... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_task_args(self, test_name):\n task_args = {}\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(... | [
"0.6594043",
"0.65554553",
"0.61109805",
"0.6043353",
"0.59985673",
"0.59900707",
"0.5975896",
"0.59682924",
"0.59195334",
"0.5870925",
"0.5840858",
"0.5762598",
"0.5727708",
"0.5724328",
"0.5706142",
"0.56642747",
"0.5651907",
"0.5580692",
"0.55498743",
"0.5525899",
"0.54782... | 0.69316447 | 0 |
Returns deployment id for active Rally deployment | def get_verifier_deployment_id():
cmd = ("rally deployment list | awk '/" +
getattr(config.CONF, 'rally_deployment_name') +
"/ {print $2}'")
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) as proc:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deployment_id(self) -> str:\n return pulumi.get(self, \"deployment_id\")",
"def get_verifier_deployment_id():\n cmd = (\"rally deployment list | awk '/\" +\n getattr(config.CONF, 'rally_deployment_name') +\n \"/ {print $2}'\")\n proc = subprocess.Popen(cmd, shell=True,\n ... | [
"0.8321824",
"0.8090205",
"0.7915562",
"0.7642716",
"0.7500653",
"0.6717956",
"0.6637013",
"0.6469506",
"0.6452627",
"0.6147499",
"0.6131181",
"0.60924846",
"0.60842574",
"0.5925855",
"0.5919809",
"0.5907035",
"0.57978237",
"0.579003",
"0.5778056",
"0.5776697",
"0.5770048",
... | 0.81415844 | 1 |
Create new rally deployment | def create_rally_deployment(environ=None):
# pylint: disable=unexpected-keyword-arg
# set the architecture to default
pod_arch = env.get("POD_ARCH")
arch_filter = ['aarch64']
if pod_arch and pod_arch in arch_filter:
LOGGER.info("Apply aarch64 specific to rally config... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_rally_deployment(environ=None):\n # set the architecture to default\n pod_arch = env.get(\"POD_ARCH\")\n arch_filter = ['aarch64']\n\n if pod_arch and pod_arch in arch_filter:\n LOGGER.info(\"Apply aarch64 specific to rally config...\")\n with open(RALLY_AARCH64_PATCH_PATH, \"r... | [
"0.79588085",
"0.69769126",
"0.6959189",
"0.68106496",
"0.68078554",
"0.63774866",
"0.6288516",
"0.62842375",
"0.62308085",
"0.6210832",
"0.61876476",
"0.6130477",
"0.609062",
"0.6017506",
"0.6001469",
"0.60007924",
"0.5995541",
"0.59840333",
"0.59733856",
"0.59307474",
"0.58... | 0.78494895 | 1 |
Set keystone_default_role in rally.conf | def update_keystone_default_role(rally_conf='/etc/rally/rally.conf'):
if env.get("NEW_USER_ROLE").lower() != "member":
rconfig = configparser.RawConfigParser()
rconfig.read(rally_conf)
if not rconfig.has_section('openstack'):
rconfig.add_section('openstack')
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_default_role(self, rally_conf='/etc/rally/rally.conf'):\n role = self.get_default_role(self.cloud)\n if not role:\n return\n rconfig = configparser.RawConfigParser()\n rconfig.read(rally_conf)\n if not rconfig.has_section('openstack'):\n rconfig.a... | [
"0.7335462",
"0.602011",
"0.59378004",
"0.5808055",
"0.57735175",
"0.5590387",
"0.55819136",
"0.5538477",
"0.5494703",
"0.549238",
"0.5442465",
"0.54219705",
"0.5356283",
"0.53251183",
"0.5314031",
"0.52759516",
"0.5272258",
"0.5216909",
"0.5208145",
"0.51750976",
"0.51750976... | 0.8199875 | 0 |
Get task id from command rally result. | def get_task_id(tag):
cmd = ["rally", "task", "list", "--tag", tag, "--uuids-only"]
output = subprocess.check_output(cmd).decode("utf-8").rstrip()
LOGGER.info("%s: %s", " ".join(cmd), output)
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_task_result(task_id: TaskId):",
"def task_id(self) -> str:\n return self.get_from_redis(\"task_id\")",
"def get_run_id_from_result(model_result):\n if 'ml_flow' not in model_result:\n return None\n\n with model_result['ml_flow'].open('r') as f:\n return ... | [
"0.70485806",
"0.6791863",
"0.6777043",
"0.6657056",
"0.6641306",
"0.65473753",
"0.6410299",
"0.6408232",
"0.6408232",
"0.6408232",
"0.6408232",
"0.63412124",
"0.62609065",
"0.62482697",
"0.6221775",
"0.6181935",
"0.610406",
"0.6102988",
"0.60917234",
"0.60808253",
"0.6067667... | 0.7335865 | 0 |
Determine if migration is supported. | def _migration_supported(self):
if self.compute_cnt > 1:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_migration(self, migration: str) -> bool:\n pass",
"def is_migrated_before():\n\n global migration_sign\n if os.path.exists(migration_sign):\n return True\n else:\n return False",
"def is_migrated(self) -> bool:\n return pulumi.get(self, \"is_migrated\")",
"def n... | [
"0.7405025",
"0.643145",
"0.64205694",
"0.6287498",
"0.62607265",
"0.6124708",
"0.603821",
"0.5938896",
"0.58489233",
"0.58389837",
"0.583489",
"0.58281356",
"0.5824205",
"0.57566595",
"0.5745185",
"0.5720472",
"0.5694382",
"0.5677317",
"0.5673947",
"0.5661287",
"0.56218493",... | 0.81642824 | 0 |
Check if given needle is in the iterable haystack, using regex. | def in_iterable_re(needle, haystack):
# match without regex
if needle in haystack:
return True
for pattern in haystack:
# match if regex pattern is set and found in the needle
if pattern and re.search(pattern, needle) is not None:
return True
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lines_contain(haystack: Iterable[str], needle: str) -> Iterator[int]:\n pat = re.compile(rf\"^\\W*{re.escape(needle)}\\W*$\")\n yield from (idx for idx, line in enumerate(haystack) if pat.match(line))",
"def _match_regex_list(subject, expressions):\n for expr in expressions:\n if re.search(ex... | [
"0.6985944",
"0.6416562",
"0.6401106",
"0.63869286",
"0.6322223",
"0.631153",
"0.6275128",
"0.6272992",
"0.6135999",
"0.6135562",
"0.6092985",
"0.6087945",
"0.59895885",
"0.5989014",
"0.5948714",
"0.5944926",
"0.591324",
"0.5881836",
"0.5838605",
"0.58077693",
"0.5800304",
... | 0.85009944 | 0 |
Build arguments for the Rally task. | def build_task_args(self, test_name):
task_args = {}
if self.ext_net:
task_args['floating_network'] = str(self.ext_net.name)
else:
task_args['floating_network'] = ''
task_args['image_name'] = str(self.image.name)
task_args['flavor_name'] = str(self.flavor.... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_task_args(self, test_name):\n task_args = {'service_list': [test_name]}\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n task_args['flavor_alt_name'] = str(self.flavor_alt.name)\n task_args['glance_image_location'] = ... | [
"0.6930395",
"0.6554711",
"0.61110663",
"0.6043896",
"0.60004854",
"0.5989032",
"0.5976932",
"0.5970796",
"0.59215456",
"0.58704364",
"0.58405006",
"0.57609105",
"0.5728019",
"0.5724147",
"0.5706747",
"0.566341",
"0.5654405",
"0.5582019",
"0.55506146",
"0.5526752",
"0.5480462... | 0.6591516 | 1 |
potentially excessively complicated but very pretty code that makes a topography inclusive map of the UK within a figure positon ('fig1'), given a dataset on an xy grid ('indata'), lon and lat arrays on the same grid ('datlons', 'datlats'), contour levels ('clevs'), a title ('mtitle'), and the correct units ('mtitle', ... | def uk_map(fig1, indata, clevs, datlons, datlats, mtitle, munits, maskswitch):
from mpl_toolkits import basemap as bm
import matplotlib.cm as cm
from mpl_toolkits.basemap import shiftgrid
from netCDF4 import Dataset
from matplotlib.colors import LightSource
import matplotlib.pyplot as plt
import numpy as np
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def maplot_c(pdata, pdata1, title='', precip='no'):\n from mpl_toolkits.basemap import Basemap, shiftgrid\n import numpy as np\n import matplotlib.pyplot as plt\n from netcdfread import ncread\n lon = ncread('/network/aopp/hera/mad/bakerh/HAPPI/batch_518/atmos/item3236_monthly_mean/item3236_monthly_... | [
"0.67058265",
"0.6691103",
"0.66687226",
"0.65568155",
"0.6486296",
"0.64630854",
"0.62353355",
"0.6190396",
"0.6162545",
"0.60807335",
"0.60436964",
"0.60345477",
"0.6025459",
"0.6024494",
"0.5966506",
"0.5934152",
"0.58613974",
"0.58610916",
"0.5854339",
"0.5782889",
"0.575... | 0.80338895 | 0 |
Loop through the student marking directory. | def mark_students(submitdir):
all_students = os.listdir(submitdir)
all_students.remove("copyToMarking")
for student in all_students:
# construct the path to the individual student's submission
studentdir = submitdir + os.sep + student + os.sep + "marking" + os.sep
# ext... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iterStamps(self):\n try:\n names = os.listdir(self.path)\n except OSError:\n return\n for name in names:\n if name and name[0] != '.':\n try:\n yield self.stampType(name)\n except:\n pass",... | [
"0.5722948",
"0.5477371",
"0.5338399",
"0.53299177",
"0.5326988",
"0.5218644",
"0.5190301",
"0.5170352",
"0.5155074",
"0.5140579",
"0.5110162",
"0.5099991",
"0.5098159",
"0.50959295",
"0.5093526",
"0.5088655",
"0.5086895",
"0.5079497",
"0.5062495",
"0.50536406",
"0.50320464",... | 0.7576824 | 0 |
this function reads a sequence file in FASTA format and stores in a dictionary format for future manipulation | def read_fasta_to_dictionary(genome_file):
filename = genome_file
dct = {}
id_name = ""
sequence = ""
first_pass = 1
read_fh = open(filename, 'r')
for i, line in enumerate(read_fh):
line = line.rstrip()
if re.search(r'^>(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+)(\s+)(\S+... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_fasta(fasta_file):\n\n seq_dict = dict() # Declare a new dictionary\n\n with open(fasta_file,'r') as f:\n lines = f.readlines()\n defline = \"\"\n for li in lines:\n li = li.rstrip() # remove newlines\n if '>' in li:\n defline = li # if i use... | [
"0.80082375",
"0.7946598",
"0.7937227",
"0.7748772",
"0.77080435",
"0.7703732",
"0.76890284",
"0.76189744",
"0.75971746",
"0.75659543",
"0.7555362",
"0.75448287",
"0.74920994",
"0.74830425",
"0.7425911",
"0.74154216",
"0.73772836",
"0.7331254",
"0.7291377",
"0.7243661",
"0.72... | 0.8087481 | 0 |
Update dependencies in the virtualenv. | def update_dependencies():
pip = env.virtualenv.child('bin', 'pip')
reqs = env.code_dir.child('deploy-requirements.txt')
sudo('%s -q install -U pip' % pip)
sudo('%s -q install -r %s' % (pip, reqs)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_project():\n with cd(env.code_dir):\n with _virtualenv():\n run('git pull origin master')\n install_requirements()\n perform_migration()\n collect_static()",
"def update_requirements():\n\n with virtualenv(VIRTUALENV_PATH):\n cmd = ['pip ... | [
"0.7614464",
"0.75438637",
"0.74239606",
"0.7362422",
"0.7184676",
"0.71728396",
"0.709127",
"0.7058197",
"0.70214695",
"0.68973327",
"0.6793373",
"0.6724697",
"0.6512721",
"0.64484334",
"0.6406469",
"0.63691974",
"0.6361465",
"0.6345305",
"0.62930983",
"0.6189988",
"0.617514... | 0.81182396 | 0 |
Copy the production DB locally for testing. | def copy_db():
local('ssh %s pg_dump -U djangoproject -c djangoproject | psql djangoproject' % env.hosts[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")",
"def create_prod_db(... | [
"0.7579767",
"0.73710656",
"0.6812004",
"0.67789894",
"0.6553059",
"0.65024436",
"0.64336926",
"0.64127076",
"0.63445675",
"0.6238479",
"0.6173804",
"0.6080428",
"0.6049761",
"0.6043762",
"0.5998744",
"0.5943636",
"0.5920916",
"0.5919961",
"0.5919428",
"0.5902814",
"0.5893289... | 0.7857592 | 0 |
Copy build docs locally for testing. | def copy_docs():
local('rsync -av --delete --exclude=.svn %s:%s/ /tmp/djangodocs/' %
(env.hosts[0], env.deploy_base.child('docbuilds'))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deploy_nucleondocs():\n\n # Copy generated docs to docs_webserver on target machine\n rsync_project(\n remote_dir= '/srv/docs_webserver/docs/nucleon/',\n local_dir=join(dirname(__file__), 'docs/_build/html/'),\n delete=True)",
"def docs():\n sh('sphinx-build -W -b html docs docs... | [
"0.6975561",
"0.6930959",
"0.6866347",
"0.68147665",
"0.67821646",
"0.6724307",
"0.67216283",
"0.66714007",
"0.65250874",
"0.6519332",
"0.6382937",
"0.63510007",
"0.62922466",
"0.62160635",
"0.6208253",
"0.62051785",
"0.61902285",
"0.61704403",
"0.6158973",
"0.6112555",
"0.59... | 0.78566396 | 0 |
Southify an app remotely. This fakes the initial migration and then migrates forward. Use it the first time you do a deploy on app that's been newly southified. | def southify(app):
managepy('migrate %s 0001 --fake' % app)
managepy('migrate %s' % app) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deploy():\n\n require('environment', provided_by=env.environments)\n update_source()\n update_requirements()\n mgmt('syncdb', '--migrate')\n restart_supervisor()",
"def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 't... | [
"0.6649584",
"0.6453384",
"0.64402455",
"0.64024436",
"0.637955",
"0.6271012",
"0.61587465",
"0.60783327",
"0.605457",
"0.6015112",
"0.6007292",
"0.59989643",
"0.5960935",
"0.595262",
"0.5940856",
"0.59332865",
"0.589287",
"0.5877876",
"0.5836774",
"0.58165526",
"0.57907",
... | 0.70444345 | 0 |
self.nl = self.analyze_folder("NL") self.nl.to_csv(self.folder + "/nl.csv") self.pl = self.analyze_folder("PL") self.pl.to_csv(self.folder + "/pl.csv") self.nt = self.analyze_folder("NT") self.nt.to_csv(self.folder + "/nt.csv") self.pt = self.analyze_folder("PT") self.pt.to_csv(self.folder + "/pt.csv") | def analyze_data(self):
self.truth = self.analyze_folder("Truth")
self.truth.to_csv(self.folder + "/truth.csv")
self.false = self.analyze_folder("False")
self.flase.to_csv(self.folder + "/false.csv") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n languages = ['Greek']\n counts = [24]\n dataset = []\n\n for i in range(len(languages)):\n for j in range(1,counts[i]+1):\n if j >= 10:\n charPath = languages[i] + '/character' + str(j)\n else:\n charPath = languages[i] + '/charac... | [
"0.7004485",
"0.68498296",
"0.6444635",
"0.62651443",
"0.6250562",
"0.61392814",
"0.6122473",
"0.6085559",
"0.60484815",
"0.6020855",
"0.5994407",
"0.5986495",
"0.59810555",
"0.5963203",
"0.5939775",
"0.58972245",
"0.58890104",
"0.58844423",
"0.58789444",
"0.5876309",
"0.5867... | 0.7053575 | 0 |
Changes the theme between dark and normal | def dark_theme(self):
if self.actionDark_Theme.isChecked():
QApplication.setStyle(QStyleFactory.create("Fusion"))
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColo... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dark_mode(grid: bool = False) -> sns.set_theme:\n if grid:\n return sns.set_theme(style=\"darkgrid\")\n return sns.set_theme(style=\"dark\")",
"def dark_mode(app):\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(30, 30, 30))\n palette.setColor(QPalette.WindowText, QCol... | [
"0.78414094",
"0.74640346",
"0.74385226",
"0.7424977",
"0.73269147",
"0.7200979",
"0.70900446",
"0.6759279",
"0.6744168",
"0.666364",
"0.66361445",
"0.6613044",
"0.65831906",
"0.65280896",
"0.6456559",
"0.6409506",
"0.6269526",
"0.61850554",
"0.61629736",
"0.61629736",
"0.610... | 0.8108005 | 0 |
Check for errors in linkage | def update_linkage_error(links=None):
links = links or Linkage.objects.all()
for idx, link in enumerate(links):
link.error_check(depth=0)
update_task_info('PROGRESS', meta={'current': idx, 'total': len(links)}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_errors(self) -> None:",
"def has_errors_fatal(self) -> bool:",
"def has_errors(self) -> bool:",
"def check_linking(self):\n\n # This one checks if the linking command works out of the box or\n # if any specific flag is required. For example if the linker if the\n # Intel FORTRA... | [
"0.6475067",
"0.6206964",
"0.61585003",
"0.6133443",
"0.60374117",
"0.60219735",
"0.60219735",
"0.59338003",
"0.5918931",
"0.5886056",
"0.58538455",
"0.5843536",
"0.57988787",
"0.5684941",
"0.5683721",
"0.56663525",
"0.56592804",
"0.56536925",
"0.56520957",
"0.5601067",
"0.55... | 0.6460065 | 1 |
Check for errors in workitems | def update_workitem_error(cases=None):
cases = cases or WorkItem.objects.all()
for idx, case in enumerate(cases):
case.error_check(depth=0)
update_task_info(state='PROGRESS', meta={'current': idx, 'total': len(cases)}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_errors(self) -> None:",
"def has_errors(self) -> bool:",
"def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")",
"def error_check(command):\r\n\r\n # TODO\r",
"def test_results_error_stacktrace(self, affiliate_items):\n updater = mock.Mock(side_eff... | [
"0.7019857",
"0.64408",
"0.6280564",
"0.6200474",
"0.61952287",
"0.61811614",
"0.6174349",
"0.6168359",
"0.6143135",
"0.613679",
"0.61158603",
"0.61091346",
"0.60561216",
"0.60122675",
"0.60075825",
"0.60042787",
"0.5979714",
"0.5968149",
"0.5967855",
"0.5961485",
"0.5925314"... | 0.65584564 | 1 |
Clean all models except Error. | def clean_all_db():
for model in [
Component, Arch, AutoCase, AutoCaseFailure, Bug, Linkage, WorkItem,
Document, Project, Framework]:
model.objects.all().delete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_models(self):\n # TODO: Add the exclude parameter in the signature of the method.\n # Call full_clean with ``exclude``, so that we can exclude any models\n # fields we want from the validation.\n for element in isinstance(self.request.data, self.model) \\\n and ... | [
"0.74855083",
"0.67072684",
"0.67072684",
"0.6591304",
"0.6566097",
"0.6535888",
"0.6350453",
"0.62663954",
"0.62663954",
"0.62663954",
"0.624777",
"0.6182531",
"0.6175318",
"0.61041176",
"0.61041176",
"0.6100422",
"0.60844874",
"0.6070654",
"0.60535806",
"0.6028027",
"0.6025... | 0.6942854 | 1 |
Tests geometry acess methods of the class GeometryAccess. | def test_access(geometry):
geometry.print_list_of_geos()
geometry.print_list_of_geos_children()
logger.info('TOP GEO:')
top_geo = geometry.get_top_geo()
top_geo.print_geo_children()
logger.info('INTERMEDIATE GEO (QUAD):')
geo = geometry.get_geo('QUAD:V1', 0)
#geo = geometry.get_top_geo... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_02_get_geometry_collection_details(self):\n geometry_collection = GeometryCollection(**self.test_data)\n geometry_collection.save()\n response = self.client.get('/api/v1/collection/%s/' % geometry_collection.pk)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"... | [
"0.6327447",
"0.62089115",
"0.61037767",
"0.60280645",
"0.59578305",
"0.58143765",
"0.5794815",
"0.5771627",
"0.57589114",
"0.5756427",
"0.5716527",
"0.56913483",
"0.56528866",
"0.5630486",
"0.5605773",
"0.55709183",
"0.55142444",
"0.54942834",
"0.5490078",
"0.5443508",
"0.54... | 0.7388955 | 0 |
Test cspad2x2 geometry table. | def test_cspad2x2():
basedir = '/reg/g/psdm/detector/alignment/cspad2x2/calib-cspad2x2-01-2013-02-13/'
fname_geometry = basedir + 'calib/CsPad2x2::CalibV1/MecTargetChamber.0:Cspad2x2.1/geometry/0-end.data'
fname_data = basedir + 'cspad2x2.1-ndarr-ave-meca6113-r0028.dat'
geometry = GeometryAccess(fn... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_access(geometry):\n geometry.print_list_of_geos()\n geometry.print_list_of_geos_children()\n\n logger.info('TOP GEO:')\n top_geo = geometry.get_top_geo()\n top_geo.print_geo_children()\n\n logger.info('INTERMEDIATE GEO (QUAD):')\n geo = geometry.get_geo('QUAD:V1', 0)\n #geo = geome... | [
"0.63995576",
"0.6172929",
"0.59261703",
"0.5890235",
"0.5848824",
"0.5845203",
"0.578988",
"0.57143784",
"0.56967634",
"0.5629314",
"0.56118125",
"0.56068027",
"0.5600486",
"0.55879253",
"0.5568533",
"0.5525954",
"0.5519003",
"0.54754615",
"0.546403",
"0.54613346",
"0.545419... | 0.63004273 | 1 |
Updates the state with new info from the packet.\n packet Packet with info to update from. | def update(self, packet):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setPacket(self, packet):\n\t\tself.clear()\n\t\tself.packet = packet\n\t\t\n\t\tfields = self.fields\n\t\t\n\t\tfields.append(['Reception time', '%s:%s:%s.%s' % tuple(packet.time), None])\n\t\t\n\t\tif self.packet.isInvalid:\n\t\t\treturn\n\t\t\n\t\tfields.append(['Transmission info', 'CRC passed: %s, LQI: %s... | [
"0.6572827",
"0.6166308",
"0.5936583",
"0.59047705",
"0.5738875",
"0.5675512",
"0.56577075",
"0.56325054",
"0.56124914",
"0.5608003",
"0.55850834",
"0.5570383",
"0.5556657",
"0.5554677",
"0.55363494",
"0.55109787",
"0.5457843",
"0.5445314",
"0.5434475",
"0.53559756",
"0.53285... | 0.7768925 | 0 |
Override _search to order the results, according to some employee. The order is the following limit (limited leaves first, such as Legal Leaves) virtual remaining leaves (higher the better, so using reverse on sorted) This override is necessary because those fields are not stored and depends on an employee_id given in ... | def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):
leave_ids = super (HolidaysType, self)._search (args, offset=offset, limit=limit, order=order, count=count,
access_rights_uid=access_rights_uid)
if not count and not orde... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if context is None:\n context = {}\n if 'emp_hours' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('emp.luggage_transfer.hours'),\n ... | [
"0.64379",
"0.57039595",
"0.52205575",
"0.51754266",
"0.5063366",
"0.5011705",
"0.5009542",
"0.49143377",
"0.48910806",
"0.4789285",
"0.4787127",
"0.47528285",
"0.47489092",
"0.46934026",
"0.4679221",
"0.4671263",
"0.46634984",
"0.4649555",
"0.4624436",
"0.46154812",
"0.46114... | 0.64316547 | 1 |
If there are no date set for date_to, automatically set one day later than the date_from. Also update the number_of_days. | def _onchange_date_from(self):
date_from = self.date_from
date_to = self.date_to
self.compute_valid_leaves_for_employee(date_from, date_to)
# policy_id = self.env['leaves.policy'].sudo().search(
# [('leave_type', '=', self.holiday_status_id.id), ('company_id', '=', self.env.user.company_id.id)])
# if date... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def date_to(self, date_to):\n\n self._date_to = date_to",
"def _set_days_until_triage(self):\n if self.sla_triaged_at:\n btd = dates.businesstimedelta(self.created_at, self.sla_triaged_at)\n self.days_until_triage = btd.days\n else:\n self.days_until_triage =... | [
"0.6600826",
"0.62123364",
"0.60784036",
"0.6028921",
"0.6023509",
"0.58135504",
"0.5699685",
"0.56320995",
"0.5561258",
"0.55328417",
"0.55286765",
"0.551658",
"0.55129164",
"0.5464243",
"0.5361167",
"0.5355377",
"0.5281642",
"0.5279441",
"0.5274924",
"0.5264698",
"0.5257835... | 0.6646776 | 0 |
This method will create entry in resource calendar leave object at the time of holidays validated | def _create_resource_leave(self):
for leave in self:
self.env['resource.calendar.leaves'].create ({
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_leave_request(self):\n\t\tfor holiday in self.filtered (lambda request: request.type == 'remove' and request.holiday_type == 'employee'):\n\t\t\tmeeting_values = holiday._prepare_holidays_meeting_values ()\n\t\t\tmeeting = self.env['calendar.event'].with_context (no_mail_to_attendees=True).create (me... | [
"0.7653445",
"0.7196214",
"0.65434945",
"0.6539993",
"0.63767076",
"0.6295288",
"0.6260696",
"0.6255377",
"0.62113553",
"0.6104574",
"0.6042434",
"0.58993787",
"0.5892861",
"0.58624077",
"0.5775199",
"0.5663149",
"0.56233764",
"0.56051123",
"0.5601992",
"0.5562575",
"0.554199... | 0.7792591 | 0 |
Validate leave requests (holiday_type='employee' and holiday.type='remove') by creating a calendar event and a resource leaves. | def _validate_leave_request(self):
for holiday in self.filtered (lambda request: request.type == 'remove' and request.holiday_type == 'employee'):
meeting_values = holiday._prepare_holidays_meeting_values ()
meeting = self.env['calendar.event'].with_context (no_mail_to_attendees=True).create (meeting_values)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_resource_leave(self):\n\t\tfor leave in self:\n\t\t\tself.env['resource.calendar.leaves'].create ({\n\t\t\t\t'name': leave.name,\n\t\t\t\t'date_from': leave.date_from,\n\t\t\t\t'holiday_id': leave.id,\n\t\t\t\t'date_to': leave.date_to,\n\t\t\t\t'resource_id': leave.employee_id.resource_id.id,\n\t\t\t\t... | [
"0.76563054",
"0.6980905",
"0.6831349",
"0.65803087",
"0.65579015",
"0.65431947",
"0.6317081",
"0.6314969",
"0.62980187",
"0.60757554",
"0.59903985",
"0.58735424",
"0.5798595",
"0.5751281",
"0.5667299",
"0.5629215",
"0.5600948",
"0.55589676",
"0.55558693",
"0.5510392",
"0.548... | 0.87582946 | 0 |
Handle HR users and officers recipients that can validate or refuse holidays directly from email. | def _notification_recipients(self, message, groups):
groups = super (Holidays, self)._notification_recipients (message, groups)
self.ensure_one ()
hr_actions = []
if self.state == 'confirm':
app_action = self._notification_link_helper ('controller', controller='/hr_holidays/validate')
hr_actions += [{'ur... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mail_responder(event, _):\n logger.info('%s: Request received:%s', __name__,\n str(event['Records'][0]['eventSource']))\n\n try:\n (source_email, recipient) = parse_ses_notification(\n event['Records'][0]['ses'])\n except Exception:\n logger.error('Error parsing... | [
"0.5860881",
"0.5794972",
"0.5757674",
"0.5723443",
"0.5606513",
"0.5556952",
"0.5550611",
"0.5537931",
"0.5508338",
"0.5471315",
"0.53628576",
"0.53263485",
"0.53258735",
"0.53146553",
"0.52828825",
"0.52389985",
"0.5191454",
"0.51899254",
"0.5173753",
"0.5155635",
"0.515328... | 0.59531474 | 0 |
Skips tokens until we hit the token with value 'find' | def skip_until(self, find, consume=False):
# TODO: handle scanner errors, EOF
if isinstance(find, str):
find = [find]
while self.token.value not in find:
self.token = next(self.tokens)
if consume:
self.token = next(self.tokens)
# always con... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def skip(self, position):\n if not self._is_position_in_corpus(position):\n raise ValueError('{0} not in corpus'.format(position))\n\n self._staged_next = None\n self._has_staged = False\n self._next_search_pos = position",
"def test_find_token_no_matches(self, token_re):\n... | [
"0.5658785",
"0.56327987",
"0.5598761",
"0.5581111",
"0.5537023",
"0.5429428",
"0.53949994",
"0.5328356",
"0.52932805",
"0.52559245",
"0.52536815",
"0.5205816",
"0.5193714",
"0.51935846",
"0.517538",
"0.5172944",
"0.50992453",
"0.50889593",
"0.50706685",
"0.50678885",
"0.4990... | 0.7885091 | 0 |
Returns a list of symbol names in the current scope | def cur_symbols(self):
return self.symbols[-1].keys() + self.global_symbols.keys() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_symbol(self):\n return []",
"def get_symbols(self, type_name):\n return self._symtab[type_name].get_symbols()",
"def get_symbols_list(self):\n return self.symbols_list",
"def list_syms():\n\tSymStringVec=[];\n\tSymStringVec.append(\"CSYM\");\n\tSymStringVec.append(\"DSYM\");\n\tSymSt... | [
"0.74625385",
"0.72063017",
"0.7173719",
"0.69365084",
"0.6891114",
"0.68857193",
"0.6881177",
"0.6853945",
"0.6763958",
"0.666098",
"0.65792805",
"0.6552155",
"0.6484818",
"0.6465965",
"0.6334895",
"0.6305106",
"0.6294767",
"0.6293096",
"0.6277566",
"0.6261958",
"0.6206069",... | 0.72822034 | 1 |
Returns the size in byte of the local paramters | def local_param_size(self):
size = 0
for s in self.symbols[-1]:
if self.symbols[-1][s].type == 'procedure': continue
if not self.symbols[-1][s].isparam: continue
size += 1
return size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_symbols_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if self.symbols[-1][s].isparam: continue\n #if self.symbols[-1][s].isparam: continue\n size += self.symbols[-1][s].size\n r... | [
"0.759114",
"0.702782",
"0.70264846",
"0.69344485",
"0.69227624",
"0.68348753",
"0.68206537",
"0.6802461",
"0.67675924",
"0.6758175",
"0.6757479",
"0.6746264",
"0.6734",
"0.6717889",
"0.6693805",
"0.66817945",
"0.66817945",
"0.66736794",
"0.6670453",
"0.6669982",
"0.66659766"... | 0.8453446 | 0 |
Returns the size in bytes of the local symbols | def local_symbols_size(self):
size = 0
for s in self.symbols[-1]:
if self.symbols[-1][s].type == 'procedure': continue
if self.symbols[-1][s].isparam: continue
#if self.symbols[-1][s].isparam: continue
size += self.symbols[-1][s].size
return size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def global_symbols_size(self):\n size = 0\n for s in self.global_symbols:\n if self.global_symbols[s].type == 'procedure': continue\n size += self.global_symbols[s].size\n return size",
"def expected_size(self):\n return self.nsym * self.symbol_len_per_byte",
"... | [
"0.78402543",
"0.74415696",
"0.7194053",
"0.67470306",
"0.6624587",
"0.63827235",
"0.6340208",
"0.6319925",
"0.6165855",
"0.609423",
"0.60747105",
"0.6067792",
"0.6066112",
"0.6058626",
"0.6032693",
"0.602242",
"0.60105544",
"0.6000291",
"0.59911376",
"0.5981964",
"0.594508",... | 0.85582393 | 0 |
Returns the size in bytes of the global symbols | def global_symbols_size(self):
size = 0
for s in self.global_symbols:
if self.global_symbols[s].type == 'procedure': continue
size += self.global_symbols[s].size
return size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def local_symbols_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if self.symbols[-1][s].isparam: continue\n #if self.symbols[-1][s].isparam: continue\n size += self.symbols[-1][s].size\n r... | [
"0.7904729",
"0.77030617",
"0.7503799",
"0.7279682",
"0.6880621",
"0.6663535",
"0.6554453",
"0.65205085",
"0.64663875",
"0.6297727",
"0.6290106",
"0.6283424",
"0.6266394",
"0.61969995",
"0.61902064",
"0.61764604",
"0.6048911",
"0.604705",
"0.6027367",
"0.6023816",
"0.60055757... | 0.87714946 | 0 |
Get account balance for address at a given block number | def get_balance_by_block(address, block_num, endpoint=_default_endpoint, timeout=_default_timeout) -> int:
method = 'hmy_getBalanceByBlockNumber'
params = [
address,
str(hex(block_num))
]
balance = rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
try:
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_balance(self, address):\n balance = 0\n for block in self.chain:\n for t in block['transactions']:\n if t['recipient'] == address:\n balance += t['amount']\n elif t['sender'] == address:\n balance -= t['amount']\n ... | [
"0.7753831",
"0.76965743",
"0.75815",
"0.7496749",
"0.7283361",
"0.7160326",
"0.713291",
"0.6988025",
"0.6978337",
"0.6957172",
"0.69118226",
"0.6904677",
"0.68739074",
"0.68685806",
"0.6836331",
"0.6831155",
"0.6781547",
"0.6766959",
"0.67386377",
"0.65896213",
"0.655743",
... | 0.7808565 | 0 |
Get current account balance in all shards & optionally report errors getting account balance for a shard | def get_balance_on_all_shards(address, skip_error=True, endpoint=_default_endpoint, timeout=_default_timeout) -> list:
balances = []
sharding_structure = get_sharding_structure(endpoint=endpoint, timeout=timeout)
for shard in sharding_structure:
try:
balances.append({
'sh... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_balance(self):\n r = requests.get(build_api_call(self.base_url, None, 'balance', ''), auth=HTTPBasicAuth(KEY, SECRET))\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'",
"def _balances(self) -> Dict[str, int]:\n\n return self.client.g... | [
"0.666637",
"0.6562417",
"0.649321",
"0.63970065",
"0.6385605",
"0.6329423",
"0.6321331",
"0.6281901",
"0.6257391",
"0.6218982",
"0.6182547",
"0.6173757",
"0.61105436",
"0.6097671",
"0.6081987",
"0.60235274",
"0.6020045",
"0.59833217",
"0.59707737",
"0.5962612",
"0.5960762",
... | 0.6737994 | 0 |
Write the converted routes to the configuration file. | def write_routes(self, pod, collection):
routes_file = os.path.join(collection.pod_path, ROUTES_FILENAME)
if self.data['routes']:
print(' └─ Writing: {}'.format(routes_file))
print('')
output = yaml.dump(
self.data, Dumper=yaml_utils.PlainTextYamlDump... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_routes(output_dir: str, routes: List[Dict[str, Any]]):\n\n routes_file = ROUTES_FILE.format(output_dir=output_dir)\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n with open(routes_file, 'w') as f:\n logging.info(f'Wrote {len(routes)} routes to {routes_file}.')\n ... | [
"0.6734417",
"0.64529943",
"0.6035774",
"0.5861394",
"0.58209974",
"0.58195937",
"0.5759961",
"0.5752589",
"0.5728276",
"0.56543946",
"0.5629924",
"0.5625839",
"0.5625839",
"0.56163245",
"0.5613927",
"0.55870634",
"0.5579713",
"0.55711526",
"0.55490965",
"0.55257416",
"0.5522... | 0.66468537 | 1 |
Perform the conversion to use collection based routing. | def convert(self):
print('Converting: {}'.format(self.collection.pod_path))
# Pull out the meta information from all the docs.
sorted_docs = sorted(self.collection.list_docs_unread(), key=lambda doc: doc.pod_path)
for doc in sorted_docs:
self.routes_data.extract_doc(doc)
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalise(self) -> \"Route\":\n pass",
"def _do_mapping(self):\n pass",
"def route(self):\n pass",
"def _translate_to_collection(\n self,\n collection,\n recursive=False,\n run_conditions=[],\n resource_conditions=[],\n variety_conditions... | [
"0.5987358",
"0.54556525",
"0.5441375",
"0.53992254",
"0.5345253",
"0.5340782",
"0.5335203",
"0.5289516",
"0.5278404",
"0.523436",
"0.5180618",
"0.51348835",
"0.5123802",
"0.510506",
"0.5033277",
"0.50302935",
"0.50169706",
"0.5013713",
"0.5008044",
"0.49894407",
"0.49573487"... | 0.6795894 | 0 |
Handle user signup. Create new user and add to DB. Redirect to home page. If form not valid, present form. | def signup():
form = UserAddForm()
if form.validate_on_submit():
try:
user = User.signup(
username=form.username.data,
password=form.password.data,
)
db.session.commit()
except IntegrityError:
flash("Username alre... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sign_up():\n form = RegisterForm()\n if request.method == \"GET\":\n return render_template('adduser.html', title='Add New User', form=form)\n if request.method == 'POST' and form.validate_on_submit():\n username = form.username.data\n password = form.password1.data\n email... | [
"0.84403425",
"0.8225739",
"0.8176397",
"0.8168092",
"0.8141075",
"0.81184983",
"0.8070323",
"0.8016985",
"0.80074",
"0.7969208",
"0.79564214",
"0.79262125",
"0.7865015",
"0.7858723",
"0.7858716",
"0.77863723",
"0.7768571",
"0.7757153",
"0.77525586",
"0.76977766",
"0.76679504... | 0.8322984 | 1 |
Returns the probability of transitioning into state s1 after taking action a in state s. | def _transition_probability(self, s, a, s1):
unreachable_states = [4, # F with prod_score == 4
5] # M with prod_score == 0
if s1 in unreachable_states:
return 0
else:
return 1 / (self.n_states - len(unreachable_states)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_transition_prob(self, state, action, next_state):\n return self.get_next_states(state, action).get(next_state, 0.0)",
"def _transition_prob(self, s1, a, s2):\n if self._stochsatic:\n return self._dynamics_noise_distribution.pdf(s2 - (s1 + a))\n else:\n return s2... | [
"0.74151826",
"0.74038136",
"0.7240678",
"0.66807497",
"0.6644237",
"0.65863276",
"0.63309807",
"0.63219106",
"0.6308571",
"0.62739587",
"0.62624675",
"0.6239656",
"0.62317383",
"0.62011445",
"0.615554",
"0.6150811",
"0.61338043",
"0.61216384",
"0.61120576",
"0.6089495",
"0.6... | 0.8406897 | 0 |
Obtain the observation for the current state of the environment. This is a fully observable environment, so we can return the state directly. Returns list | def _get_obs(self):
return self.observation_function[self.cur_state] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_state(self):\r\n return self.currentObservation",
"def getObservation(self):\n return self._cur_state",
"def get_observation_list(self):\n return self.observations",
"def current_state(self):\n return self.obs_hook(self._current_obs)",
"def _get_observation(self):\n r... | [
"0.73571426",
"0.7285132",
"0.71541566",
"0.7076889",
"0.7041503",
"0.6926391",
"0.6779948",
"0.67035097",
"0.668648",
"0.66071546",
"0.6600513",
"0.6579601",
"0.65328586",
"0.644826",
"0.6435689",
"0.6430787",
"0.64077073",
"0.6407079",
"0.6355819",
"0.63506186",
"0.6345424"... | 0.78329116 | 0 |
Takes as input data the reads from a single (precomputed) block and the genotypes for all variants inside the block. Runs a threephase algorithm to compute a phasing for this isolated | def phase_single_block(block_id, allele_matrix, genotypes, prephasing, param, timers, quiet=False):
block_num_vars = allele_matrix.getNumPositions()
# Check for singleton blocks and handle them differently (for efficiency reasons)
if block_num_vars == 1:
# construct trivial solution for singleton... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_phaseg(locus_file, gam_file, vg_file, canu_alignments, true_haps):\n\trecombrate=1.26\n\tmax_coverage = 15\n\tall_heterozygous = False\n\tdistrust_genotypes = True\n\twith ExitStack() as stack:\n\t\tnode_seq_list, edge_connections = vg_graph_reader(vg_file)\n\t\tall_reads, alleles_per_pos, locus_branch_map... | [
"0.592895",
"0.5765463",
"0.55587965",
"0.5380478",
"0.53757083",
"0.5369531",
"0.5328402",
"0.5295265",
"0.5274195",
"0.52280647",
"0.51929116",
"0.5161996",
"0.51604706",
"0.51590586",
"0.5144043",
"0.51424474",
"0.51381207",
"0.51356965",
"0.51342386",
"0.51281947",
"0.511... | 0.58804005 | 1 |
Wrapper for the phase_single_block() function. Carries a block_id through to the results. Creates a local submatrix without modifying the given allele matrix | def phase_single_block_mt(
allele_matrix,
partial_phasing,
block_id,
start,
end,
genotype_slice,
param,
timers,
job_id,
num_blocks,
quiet=False,
):
submatrix = allele_matrix.extractInterval(start, end)
subphasing = partial_phasing.extractInterval(start, end) if partia... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def phase_single_block(block_id, allele_matrix, genotypes, prephasing, param, timers, quiet=False):\n\n block_num_vars = allele_matrix.getNumPositions()\n\n # Check for singleton blocks and handle them differently (for efficiency reasons)\n if block_num_vars == 1:\n\n # construct trivial solution f... | [
"0.7084021",
"0.5748252",
"0.5678491",
"0.5542582",
"0.5512303",
"0.55118346",
"0.54758036",
"0.5363573",
"0.5286815",
"0.5286382",
"0.5283284",
"0.5243682",
"0.5229807",
"0.5212749",
"0.5199913",
"0.5186671",
"0.5162968",
"0.51537496",
"0.5136429",
"0.5083988",
"0.5079707",
... | 0.72714204 | 0 |
Collects all blockwise phasing results and aggregates them into one list for each type of information. Local ids and indices are converted to globals ones in this step. | def aggregate_results(results: List[PolyphaseBlockResult], ploidy: int, borders: List[int]):
clustering, threads, breakpoints = [], [], []
haplotypes = [[] for _ in range(ploidy)]
cid_offset, pos_offset = 0, 0
for r in results:
clustering += [clust for clust in r.clustering]
threads += [... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _collect_all(self):",
"def calc_data(self):\n\n circ_counts = {}\n for trialidx in range(self._ntrials):\n for _, depth in enumerate(self._depths):\n circ_name = 'qv_depth_%d_trial_%d' % (depth, trialidx)\n\n # get the counts form ALL executed circuits\n... | [
"0.56452453",
"0.55284214",
"0.5465167",
"0.539003",
"0.5384237",
"0.5365769",
"0.5345599",
"0.53302294",
"0.5316398",
"0.52856123",
"0.52655923",
"0.5250718",
"0.5248248",
"0.524049",
"0.52215445",
"0.52090776",
"0.51985794",
"0.5186276",
"0.5150495",
"0.5137165",
"0.5135544... | 0.57805455 | 0 |
Computes the cut positions for phasing blocks, based on the computed breakpoints of the reordering stage and the requeted block cut sensitivity. | def compute_cut_positions(
breakpoints: List[PhaseBreakpoint], ploidy: int, block_cut_sensitivity: int
):
cuts = []
hap_cuts = [[] for _ in range(ploidy)]
thresholds = [-float("inf"), -float("inf"), log(0.5), log(0.5), log(0.99), 0]
thresholds_num = [ploidy, ploidy, min(ploidy, 3), 2, 2, 0]
thr... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def determine_cutting_frames(pos_data):\n def get_pts(dat):\n \"\"\" short-cut function to determine when state in dat changes\n Args:\n A numpy vector\n Returns:\n Boolean vector\n \"\"\"\n return np.where(np.abs(np.diff(dat)) > 0)[0]+1\n... | [
"0.5430976",
"0.5360118",
"0.5335822",
"0.52487403",
"0.5235013",
"0.5226283",
"0.516665",
"0.5099744",
"0.50918156",
"0.5086627",
"0.50657815",
"0.5052516",
"0.497971",
"0.49528226",
"0.49450764",
"0.4940331",
"0.4931173",
"0.49244288",
"0.49213406",
"0.49072263",
"0.4899205... | 0.70449966 | 0 |
Given a pattern dataframe and UCSC region string, retrieve only patterns in that region. | def subset_region(df, region):
# Split the region string at each occurence of - or : (yields 3 elements)
chrom, start, end = re.split("[-:]", region)
start, end = int(start), int(end)
# Only keep patterns on the same chromosome as the region and
# within the start-end interval
subset = df.loc[
... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pattern_search(pattern, dataset, column):\n # Filter\n dataset = dataset[dataset[column].str.contains(pattern, regex=True)]\n # Reset index\n dataset = dataset.reset_index(drop=True)\n # Return\n return dataset",
"def select_regions(data, region_col, regions, combine_subregions=True):",
"... | [
"0.5442019",
"0.5398551",
"0.52937627",
"0.5278732",
"0.511519",
"0.508612",
"0.49498475",
"0.48897955",
"0.4878237",
"0.48184252",
"0.48183033",
"0.48031545",
"0.47911888",
"0.47765893",
"0.4776433",
"0.4775891",
"0.47595215",
"0.4746451",
"0.47112608",
"0.469774",
"0.469161... | 0.6566416 | 0 |
Given a pyGSTi model generate a minimal generating set of gate sequences by taking a union of the fiducials and the germs. If basic gates macros given, generate a macro for each gate sequence as well. | def base_gate_sequence_and_macros(model, basic_gates_macros: dict = None):
prep_fiducials, meas_fiducials, germs = (
model.prep_fiducials(),
model.meas_fiducials(),
model.germs(),
)
# create minimal generating gate sequence
base_gate_sequence = list({k.str.split("@")[0] for k in... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_seq_graph(sequence, macros, connects):\n seq_graph = nx.Graph()\n for idx, macro_name in enumerate(sequence):\n sub_graph = macros[macro_name].gen_graph()\n\n nx.set_node_attributes(\n sub_graph, {node: idx for node in sub_graph.nodes}, \"seqid\")\n seq_graph = nx... | [
"0.54505414",
"0.51954615",
"0.51067597",
"0.50813454",
"0.5022131",
"0.49704906",
"0.4932094",
"0.49202523",
"0.48829344",
"0.4879901",
"0.48722965",
"0.4869388",
"0.48138118",
"0.48023358",
"0.47683543",
"0.4765257",
"0.47583735",
"0.47547734",
"0.47535294",
"0.47480232",
"... | 0.7839262 | 0 |
Generate a single macro from a list of macros | def sequence_macros(macros):
def foo():
for m in macros:
m()
return foo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_macros(self, text):\n if text is None:\n return\n \n # join and split so we can accept a list or string. \n text = ''.join(text)\n for m in text.splitlines():\n name, body = m.split(None, 1)\n name, args = name.split('(', 1)\n ... | [
"0.6791655",
"0.6284933",
"0.6263033",
"0.61974406",
"0.6181545",
"0.6170639",
"0.603242",
"0.60219055",
"0.6006982",
"0.5983674",
"0.593645",
"0.5891278",
"0.5878313",
"0.5833065",
"0.58317727",
"0.58231497",
"0.5785763",
"0.56771237",
"0.5628757",
"0.5620599",
"0.5515615",
... | 0.7120662 | 0 |
Goes depth first through all possible moves until a solution was found or the maximum depth has been reached. | def run(self, max_depth):
while len(self.stack) > 0:
state = self.get_next_state()
if state.is_solution():
self.solutions.append(state.moves)
if len(state.moves) < max_depth:
self.create_children(state)
self.archive[state.get... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve(problem):\n\n # *** YOUR CODE HERE ***\n\n # The core of Iterative Deepening Search are iterations of Depth Limited\n # Search with given increasing depth.\n\n # A recursive version of Depth Limited Search\n def depth_limited_search(problem, limit):\n \"\"\"\n Return a list o... | [
"0.7213512",
"0.7089381",
"0.6998385",
"0.6994788",
"0.6951225",
"0.6930633",
"0.6860129",
"0.6857788",
"0.6846639",
"0.6840226",
"0.6836247",
"0.683497",
"0.68027216",
"0.67122006",
"0.668429",
"0.6641582",
"0.6599496",
"0.65836287",
"0.65786326",
"0.65759915",
"0.6541374",
... | 0.77762467 | 0 |
Call get_hike_distance on each hike for each weather station. Adds columns to df_hike Input parameters | def get_closest_station(df_hike, df_weather):
closest_station = []
station_distance = []
for hike_idx in df_hike.index:
hike_long = df_hike.loc[hike_idx, 'long']
hike_lat = df_hike.loc[hike_idx, 'lat']
distances = []
for stat_idx in df_weather.index:
stat_long = d... | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_distance_features(df_kek):\n df = pd.DataFrame([])\n df['distance'] = get_distance_vector(df_kek, 'latitude', 'longitude', 'del_latitude', 'del_longitude')\n df['distance_dest_from_center'] = get_distance_vector(df_kek, 'center_latitude', 'center_longitude',\n ... | [
"0.59621465",
"0.56795853",
"0.56663597",
"0.55515057",
"0.54293764",
"0.5317513",
"0.5096402",
"0.5072886",
"0.49965146",
"0.49763572",
"0.4970515",
"0.49424827",
"0.4929317",
"0.4915355",
"0.48880303",
"0.48811126",
"0.4879275",
"0.4869938",
"0.48475063",
"0.48345596",
"0.4... | 0.69872105 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.