query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Test the popxl mnist with replication example
def test_documentation_popxl_mnist_replication_train(self): filename = "mnist_rts.py --replication-factor 2" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n ...
[ "0.664523", "0.6479431", "0.6476196", "0.64748347", "0.64239633", "0.64103997", "0.6363367", "0.6277", "0.62729543", "0.61764646", "0.60306984", "0.596759", "0.5963632", "0.5930057", "0.58732194", "0.58112574", "0.58030224", "0.5776231", "0.57753116", "0.56806064", "0.5629193...
0.7531117
0
Test the popxl mnist with RTS example
def test_documentation_popxl_mnist_rts_train(self): filename = "mnist_rts.py --replication-factor 2 --rts" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = m...
[ "0.69393843", "0.6572477", "0.6475336", "0.6470662", "0.63731503", "0.6351866", "0.62574834", "0.62303245", "0.6214079", "0.61291355", "0.6029285", "0.6016668", "0.601599", "0.6014156", "0.59936774", "0.5969918", "0.595324", "0.5940546", "0.59249955", "0.5917552", "0.58708", ...
0.6889731
1
Test the popxl mnist with RTS example
def test_documentation_popxl_mnist_rts_train_test(self): filename = "mnist_rts.py --replication-factor 2 --rts --test" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_t...
[ "0.6889731", "0.6572477", "0.6475336", "0.6470662", "0.63731503", "0.6351866", "0.62574834", "0.62303245", "0.6214079", "0.61291355", "0.6029285", "0.6016668", "0.601599", "0.6014156", "0.59936774", "0.5969918", "0.595324", "0.5940546", "0.59249955", "0.5917552", "0.58708", ...
0.69393843
0
MigrateListingResponse a model defined in Swagger
def __init__(self, errors=None, inventory_item_group_key=None, inventory_items=None, listing_id=None, marketplace_id=None, status_code=None, warnings=None): # noqa: E501 # noqa: E501 self._errors = None self._inventory_item_group_key = None self._inventory_items = None self._listing_id = None self._marketplace_id = None self._status_code = None self._warnings = None self.discriminator = None if errors is not None: self.errors = errors if inventory_item_group_key is not None: self.inventory_item_group_key = inventory_item_group_key if inventory_items is not None: self.inventory_items = inventory_items if listing_id is not None: self.listing_id = listing_id if marketplace_id is not None: self.marketplace_id = marketplace_id if status_code is not None: self.status_code = status_code if warnings is not None: self.warnings = warnings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_response_paginated(paginator: PaginationBase, op: Operation) -> None:\n status_code, item_schema = _find_collection_response(op)\n\n # Switching schema to Output schema\n try:\n new_name = f\"Paged{item_schema.__name__}\"\n except AttributeError:\n new_name = f\"Paged{str(item_sc...
[ "0.6088739", "0.60347897", "0.5947383", "0.5903535", "0.5835085", "0.5733381", "0.5700714", "0.56890315", "0.563407", "0.563407", "0.563407", "0.56306905", "0.56293565", "0.5624523", "0.55867976", "0.5500636", "0.5498303", "0.5498303", "0.54847574", "0.54818034", "0.5460438",...
0.0
-1
Sets the errors of this MigrateListingResponse.
def errors(self, errors): self._errors = errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors(self, errors):\n \n self._errors = errors", "def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors", "def errors(self) -> pulumi.Output[Sequence['outputs.BatchAIErrorResponse']]:\n return pulumi.get(self, \"errors\")", "def errors...
[ "0.6449945", "0.586166", "0.5679023", "0.56335896", "0.5626413", "0.56153715", "0.56153715", "0.55020136", "0.54732645", "0.5438516", "0.5403028", "0.53804886", "0.5344446", "0.53443104", "0.532449", "0.5315556", "0.5290784", "0.52629125", "0.52527165", "0.52467006", "0.52452...
0.63594306
1
Sets the inventory_item_group_key of this MigrateListingResponse.
def inventory_item_group_key(self, inventory_item_group_key): self._inventory_item_group_key = inventory_item_group_key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def with_group_key(self, group_key):\n self.group_key = group_key\n return self", "def add_inventory_group(self, key):\n host_dict = {'hosts': [], 'vars': {}}\n self.inventory[key] = host_dict\n return", "def group_id(self, group_id):\n\n self._group_id = group_id", ...
[ "0.61608106", "0.5747907", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.5501611", "0.54646176", "0.5375903", "0.53397626", "0.53107274", "0.53079605", "0.53079605", "0.53079605", "0.5276046", "0.5275524", "0.5234625", "0.51415646", "0.5023584", "0.50226...
0.8215857
0
Sets the inventory_items of this MigrateListingResponse.
def inventory_items(self, inventory_items): self._inventory_items = inventory_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inventory(self, inventory):\n\n self._inventory = inventory", "def inventory_id(self, inventory_id):\n\n self._inventory_id = inventory_id", "def items(self, items: List[InlineResponse200Items]):\n if items is None:\n raise ValueError(\"Invalid value for `items`, must not be...
[ "0.64316744", "0.5630393", "0.56029606", "0.53314924", "0.52890193", "0.52213633", "0.5127491", "0.5053964", "0.50323474", "0.50142586", "0.49879345", "0.4986131", "0.48904306", "0.48881936", "0.48744634", "0.4842486", "0.48069787", "0.466679", "0.4665811", "0.46414807", "0.4...
0.78284794
0
Sets the listing_id of this MigrateListingResponse.
def listing_id(self, listing_id): self._listing_id = listing_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_listing(request, listing_id):\n listing = get_object_or_404(Listing, pk=listing_id)\n\n listing.delete()\n messages.success(\n request,\n 'Your listing has been removed from the database.')\n\n return redirect(reverse('addlisting'))", "def update(self, amz_listing):\n ...
[ "0.54151386", "0.50424653", "0.49489492", "0.49403444", "0.454123", "0.4485277", "0.4483447", "0.44773117", "0.44772324", "0.44687784", "0.4437293", "0.44051337", "0.4390934", "0.4387617", "0.43608093", "0.43608093", "0.4329542", "0.430929", "0.4308864", "0.4278942", "0.42528...
0.8304697
0
Sets the marketplace_id of this MigrateListingResponse.
def marketplace_id(self, marketplace_id): self._marketplace_id = marketplace_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_AWSMarketplaceId(self, value):\n super(ListOrdersInputSet, self)._set_input('AWSMarketplaceId', value)", "def registration_marketplace_id(self, registration_marketplace_id):\n\n self._registration_marketplace_id = registration_marketplace_id", "def listing_id(self, listing_id):\n\n ...
[ "0.68738204", "0.60930204", "0.5767067", "0.5177802", "0.5172527", "0.49900728", "0.49834523", "0.49834523", "0.49834523", "0.49834523", "0.49759004", "0.49217004", "0.4853357", "0.48184267", "0.47949788", "0.47403908", "0.4719058", "0.46994156", "0.46824563", "0.4669242", "0...
0.7607272
0
Sets the status_code of this MigrateListingResponse.
def status_code(self, status_code): self._status_code = status_code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status_code(self, status_code):\n allowed_values = [1, 100, 101, 102, 103, 104, 105] # noqa: E501\n if self.local_vars_configuration.client_side_validation and status_code not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `status_code` ({0})...
[ "0.62368333", "0.61369103", "0.59862417", "0.5981169", "0.5981169", "0.5981169", "0.5980721", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", "0.5924389", ...
0.6872091
0
Sets the warnings of this MigrateListingResponse.
def warnings(self, warnings): self._warnings = warnings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warnings(self):\n return self.__warnings", "def allow_warnings(self, allow_warnings):\n self._allow_warnings = allow_warnings", "def setwarnings(self, on):\n # diese Funktion macht eigentlich nichts, ist aber wegen der Kombatibilitaet vorhanden\n print(f\"setwarnings: {on}\")", ...
[ "0.6353443", "0.6246493", "0.61963874", "0.6170872", "0.61554074", "0.60583675", "0.6048729", "0.6048729", "0.5980356", "0.5967424", "0.5896825", "0.58848745", "0.5873911", "0.5829192", "0.5811515", "0.56476164", "0.56447476", "0.56372", "0.562612", "0.5593209", "0.5593209", ...
0.74362904
0
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(MigrateListingResponse, dict): for key, value in self.items(): result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n f...
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.6900...
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n ...
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442...
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n ...
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", ...
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, MigrateListingResponse): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if i...
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", ...
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n ...
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Return a string representation of the data source.
def __repr__(self): cls_name = self.__class__.__name__ conn_name = str(self._connection) tbl_name = self._table return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_source_info(self) -> 'outputs.DatasourceResponse':\n return pulumi.get(self, \"data_source_info\")", "def __repr__(self):\n return f'{self.data.to_string(max_rows = None)}'", "def __str__(self):\n \n result = [\"rows: \" + str(self.rows),\n \"columns: \"+st...
[ "0.68854165", "0.6721115", "0.6685485", "0.6684266", "0.6650188", "0.6645341", "0.66202325", "0.65916854", "0.65053624", "0.65022147", "0.64303386", "0.64116", "0.63948935", "0.6373004", "0.63408643", "0.63251483", "0.6323788", "0.63087815", "0.62860817", "0.62860817", "0.626...
0.6042121
58
Return list of column names.
def columns(self): cursor = self._connection.cursor() cursor.execute('PRAGMA table_info(' + self._table + ')') return [x[1] for x in cursor.fetchall()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getColumnNames(self):\n return self.colnames", "def getColumnsNames(self):\r\n ColsName = []\r\n for i in range(len(self.columns)):\r\n ColsName.append(self.columns[i].getColName())\r\n return ColsName", "def column_names(self):\n return self.data.columns.value...
[ "0.8711412", "0.8577636", "0.8565291", "0.85644644", "0.8475408", "0.83783364", "0.83479637", "0.8311044", "0.8298709", "0.8245695", "0.82290727", "0.81678593", "0.8163773", "0.8153073", "0.8139276", "0.8136085", "0.8114441", "0.80130196", "0.7956286", "0.7935837", "0.7920000...
0.7413343
49
Return iterable of dictionary rows (like csv.DictReader).
def __iter__(self): cursor = self._connection.cursor() cursor.execute('SELECT * FROM ' + self._table) column_names = self.columns() dict_row = lambda x: dict(zip(column_names, x)) return (dict_row(row) for row in cursor.fetchall())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ler(self) -> Iterable[Dict[str, str]]:\n with open(self.arquivo) as f:\n for linha in DictReader(f, dialect=self.DialetoCsv):\n yield linha", "def rows(self):\r\n _rows = []\r\n try:\r\n csv_file = open(self.file_path,'rbU')\r\n csv_rows = ...
[ "0.7213585", "0.7172376", "0.7104608", "0.6873515", "0.6866606", "0.66950583", "0.66675246", "0.66160226", "0.6596041", "0.65936637", "0.6593291", "0.65640014", "0.6563573", "0.654688", "0.6439478", "0.6418613", "0.64027905", "0.6361362", "0.6346992", "0.6337884", "0.63189554...
0.66041005
9
Return iterable of tuples containing distinct columns values.
def distinct(self, columns, **kwds_filter): if not _is_nsiterable(columns): columns = (columns,) self._assert_columns_exist(columns) select_clause = [self._normalize_column(x) for x in columns] select_clause = ', '.join(select_clause) select_clause = 'DISTINCT ' + select_clause cursor = self._execute_query(select_clause, **kwds_filter) return CompareSet(cursor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_cols(self):\n return list(set([coord[1] for coord in self.landscape]))", "def unique_column_values(rows, column_name):\r\n\r\n values = [] #Create an empty list\r\n for row in rows: #Iterate through each row\r\n values.append(row[column_name]) \r\n values = set(val...
[ "0.6917783", "0.67479295", "0.66289616", "0.6525466", "0.64980805", "0.645801", "0.6347434", "0.62989324", "0.6135534", "0.60231894", "0.6020389", "0.59943646", "0.59631413", "0.59360003", "0.5925494", "0.5885717", "0.5880182", "0.5871802", "0.584933", "0.5826537", "0.5789985...
0.5745467
24
Aggregates values using SQL function selecte.g., 'COUNT()', 'SUM(col1)', etc.
def _sql_aggregate(self, sql_function, keys=None, **kwds_filter): # TODO: _sql_aggregate has grown messy after a handful of # iterations look to refactor it in the future to improve # maintainability. if not _is_nsiterable(sql_function): sql_function = (sql_function,) if keys == None: sql_function = ', '.join(sql_function) cursor = self._execute_query(sql_function, **kwds_filter) result = cursor.fetchone() if len(result) == 1: return result[0] return result # <- EXIT! if not _is_nsiterable(keys): keys = (keys,) group_clause = [self._normalize_column(x) for x in keys] group_clause = ', '.join(group_clause) select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function)) trailing_clause = 'GROUP BY ' + group_clause cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter) pos = len(sql_function) iterable = ((row[:-pos], getvals(row)) for row in cursor) if pos > 1: # Gets values by slicing (i.e., row[-pos:]). iterable = ((row[:-pos], row[-pos:]) for row in cursor) else: # Gets value by index (i.e., row[-pos]). iterable = ((row[:-pos], row[-pos]) for row in cursor) return CompareDict(iterable, keys)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aggregate_query(self):\n raise NotImplementedError", "def get_select(self):\n if self.is_count_qry is True:\n col = (Col(\"__count\", None), (\"COUNT(*)\", []), None)\n klass_info = {\"model\": self.query.model, \"select_fields\": [\"__count\"]}\n annotations = ...
[ "0.6447261", "0.6286424", "0.6268693", "0.61533105", "0.61249465", "0.6095222", "0.60426706", "0.60377187", "0.59553", "0.5910795", "0.5815274", "0.58027256", "0.57948333", "0.57921916", "0.56742746", "0.5662127", "0.56422573", "0.56114566", "0.55796754", "0.5578842", "0.5568...
0.6102247
5
Execute query and return cursor object.
def _execute_query(self, select_clause, trailing_clause=None, **kwds_filter): try: stmnt, params = self._build_query(self._table, select_clause, **kwds_filter) if trailing_clause: stmnt += '\n' + trailing_clause cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') #print(stmnt, params) cursor.execute(stmnt, params) except Exception as e: exc_cls = e.__class__ msg = '%s\n query: %s\n params: %r' % (e, stmnt, params) raise exc_cls(msg) return cursor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cursorExecute(self, query):\n return self.cursor.execute(query)", "def _execute(self, *args):\n cursor = self.db.cursor()\n cursor.execute(*args)\n return cursor", "def execute(cls, sql):\n cursor = cls.get_conn().cursor()\n cursor.execute(sql)\n return curs...
[ "0.81860805", "0.78586006", "0.785281", "0.77011305", "0.7543426", "0.75012374", "0.74967825", "0.7448869", "0.73151606", "0.7281152", "0.7221862", "0.7184796", "0.71583337", "0.7113725", "0.71003574", "0.7088783", "0.70538986", "0.70525205", "0.7023728", "0.70222384", "0.698...
0.6438476
85
Return 'WHERE' clause that implements kwds_filter constraints.
def _build_where_clause(**kwds_filter): clause = [] params = [] items = kwds_filter.items() items = sorted(items, key=lambda x: x[0]) # Ordered by key. for key, val in items: if _is_nsiterable(val): clause.append(key + ' IN (%s)' % (', '.join('?' * len(val)))) for x in val: params.append(x) else: clause.append(key + '=?') params.append(val) clause = ' AND '.join(clause) if clause else '' return clause, params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_where_clause(**kwds_filter):\n clause = []\n params = []\n items = kwds_filter.items()\n items = sorted(items, key=lambda x: x[0]) # Ordered by key.\n for key, val in items:\n if nonstringiter(val):\n clause.append(key + ' IN (%s)' % (', '.jo...
[ "0.70142585", "0.68189335", "0.66024506", "0.6276264", "0.6107378", "0.6103338", "0.6057521", "0.5986232", "0.59493124", "0.5914506", "0.5914506", "0.5914506", "0.58369356", "0.5758533", "0.569798", "0.56843215", "0.56451374", "0.5600097", "0.55459034", "0.5538299", "0.553082...
0.7027632
0
Create an index for specified columnscan speed up testing in some cases.
def create_index(self, *columns): self._assert_columns_exist(columns) # Build index name. whitelist = lambda col: ''.join(x for x in col if x.isalnum()) idx_name = '_'.join(whitelist(col) for col in columns) idx_name = 'idx_{0}_{1}'.format(self._table, idx_name) # Build column names. col_names = [self._normalize_column(x) for x in columns] col_names = ', '.join(col_names) # Prepare statement. statement = 'CREATE INDEX IF NOT EXISTS {0} ON {1} ({2})' statement = statement.format(idx_name, self._table, col_names) # Create index. cursor = self._connection.cursor() cursor.execute('PRAGMA synchronous=OFF') cursor.execute(statement)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_index():", "def build_index():\n pass", "def create_index(self, *columns):\n # Calling super() with older convention to support Python 2.7 & 2.6.\n super(SqliteSource, self).create_index(*columns)", "def create_index(self, *columns):\n # Calling super() with older conventio...
[ "0.6506796", "0.61705136", "0.60441345", "0.60441345", "0.59682924", "0.5770552", "0.5770552", "0.5722764", "0.56989115", "0.5663955", "0.5610542", "0.5545763", "0.5528273", "0.54436374", "0.54176277", "0.54133874", "0.53744704", "0.53413314", "0.52968043", "0.5292923", "0.52...
0.5729641
7
Normalize value for use as SQLite column name.
def _normalize_column(column): if not isinstance(column, str): msg = "expected column of type 'str', got {0!r} instead" raise TypeError(msg.format(column.__class__.__name__)) column = column.strip() column = column.replace('"', '""') # Escape quotes. if column == '': column = '_empty_' return '"' + column + '"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(self, value):\n return str(value)", "def _normalize_expanded_field(value):\n\n value = value.strip()\n value = re.sub(r'\\s{2,}', ' ', value)\n value = re.sub(r'/{2,}', '/', value)\n value = re.sub(r'\\\\{2,}', '\\\\\\\\', value)\n value = re.sub(r'-{2,}', '-', value)\n val...
[ "0.67484534", "0.6414139", "0.64000803", "0.6394291", "0.62145936", "0.62083966", "0.62030125", "0.6104114", "0.6069231", "0.604914", "0.59973955", "0.5969188", "0.59618396", "0.59541255", "0.5941385", "0.5940318", "0.58399045", "0.5810996", "0.5807061", "0.5782344", "0.57602...
0.6757992
1
Alternate constructor to load an existing collection of records into a tempoarary SQLite database. Loads data (an iterable of lists, tuples, or dicts) into a temporary table
def from_records(cls, data, columns=None): temptable = TemporarySqliteTable(data, columns) return cls(temptable.connection, temptable.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_records(cls, data, columns=None):\n connection, table = _load_temp_sqlite_table(columns, data)\n return cls(connection, table)", "def load_data(cursor, table, *args, **kwds):\n try:\n records, = args\n columns = None\n except ValueError:\n columns, records = args...
[ "0.6956785", "0.6560394", "0.5986285", "0.597718", "0.5919446", "0.5904428", "0.58205336", "0.57884705", "0.5786599", "0.57413965", "0.5737391", "0.57340336", "0.56946224", "0.5691723", "0.56570345", "0.56243944", "0.56065536", "0.5595917", "0.55688184", "0.5563457", "0.55514...
0.7079159
0
Create an index for specified columnscan speed up testing in some cases. Indexes should be added onebyone to tune a test suite's overall performance. Creating several indexes before testing even begins could lead to worse performance so use them with discretion.
def create_index(self, *columns): # Calling super() with older convention to support Python 2.7 & 2.6. super(SqliteSource, self).create_index(*columns)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_index():", "def build_index():\n pass", "def _create_indices(cls):\r\n from thunderdome.connection import _hosts, _index_all_fields, create_key_index\r\n \r\n if not _hosts: return\r\n for column in cls._columns.values():\r\n if column.index or _index_all_fi...
[ "0.68685216", "0.64368653", "0.6305295", "0.61893433", "0.61893433", "0.61447036", "0.6129321", "0.5909839", "0.5897009", "0.5865872", "0.58181304", "0.57768124", "0.57675445", "0.57356316", "0.5727753", "0.57036835", "0.5691433", "0.5681699", "0.56654525", "0.56597537", "0.5...
0.6015564
8
Handles linear scaling rule and LR decay. Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the provided scaling factor.
def learning_rate_schedule(current_epoch, current_batch, batches_per_epoch, batch_size): del current_batch, batches_per_epoch # not used initial_learning_rate = common.BASE_LEARNING_RATE * batch_size / 128 learning_rate = initial_learning_rate for mult, start_epoch in LR_SCHEDULE: if current_epoch >= start_epoch: learning_rate = initial_learning_rate * mult else: break return learning_rate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjust_learning_rate_schedule(optimizer, epoch, initial_lr, decay_factor, decay_epochs):\n\n # Find the index of the current interval:\n interval_index = len([mark for mark in decay_epochs if mark < epoch])\n\n lr = initial_lr * (decay_factor ** interval_index)\n for param_group in optimizer.param_...
[ "0.6678285", "0.66325784", "0.6542716", "0.6416433", "0.6363528", "0.63295734", "0.62856907", "0.6262307", "0.62460744", "0.6218091", "0.61931515", "0.6188003", "0.61465865", "0.614502", "0.6144216", "0.6138332", "0.6137244", "0.6130743", "0.6122289", "0.6105983", "0.61007077...
0.65202194
3
Executes before step begins.
def on_batch_begin(self, batch, logs=None): lr = self.schedule(self.epochs, batch, self.steps_per_epoch, self.batch_size) if not isinstance(lr, (float, np.float32, np.float64)): raise ValueError('The output of the "schedule" function should be float.') if lr != self.prev_lr: self.model.optimizer.learning_rate = lr # lr should be a float here self.prev_lr = lr logging.debug( 'Epoch %05d Batch %05d: LearningRateBatchScheduler ' 'change learning rate to %s.', self.epochs, batch, lr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_before(self):\r\n pass", "def run_before(self):\n\n for path in self.hooks.get('before', []):\n self.run_module(path)", "def pre_execute(self):", "def before(self) -> None:\n pass", "def test_before(self):\n\n support.create_project(self, 'candice')\n su...
[ "0.7514745", "0.7484829", "0.71251595", "0.70989174", "0.70584476", "0.70152277", "0.69902265", "0.6825404", "0.6803939", "0.6744725", "0.6697794", "0.66936266", "0.6585134", "0.65798724", "0.6415457", "0.6349697", "0.6306303", "0.6304141", "0.6304141", "0.6304141", "0.630414...
0.0
-1
Run ResNet Cifar10 training and eval loop using native Keras APIs.
def run(flags_obj): keras_utils.set_session_config( enable_xla=flags_obj.enable_xla) # Execute flag override logic for better model performance if flags_obj.tf_gpu_thread_mode: keras_utils.set_gpu_thread_mode_and_count( per_gpu_thread_count=flags_obj.per_gpu_thread_count, gpu_thread_mode=flags_obj.tf_gpu_thread_mode, num_gpus=flags_obj.num_gpus, datasets_num_private_threads=flags_obj.datasets_num_private_threads) common.set_cudnn_batchnorm_mode() dtype = flags_core.get_tf_dtype(flags_obj) if dtype == 'fp16': raise ValueError('dtype fp16 is not supported in Keras. Use the default ' 'value(fp32).') data_format = flags_obj.data_format if data_format is None: data_format = ('channels_first' if tf.config.list_physical_devices('GPU') else 'channels_last') tf.keras.backend.set_image_data_format(data_format) strategy = distribution_utils.get_distribution_strategy( distribution_strategy=flags_obj.distribution_strategy, num_gpus=flags_obj.num_gpus, all_reduce_alg=flags_obj.all_reduce_alg, num_packs=flags_obj.num_packs) if strategy: # flags_obj.enable_get_next_as_optional controls whether enabling # get_next_as_optional behavior in DistributedIterator. If true, last # partial batch can be supported. strategy.extended.experimental_enable_get_next_as_optional = ( flags_obj.enable_get_next_as_optional ) strategy_scope = distribution_utils.get_strategy_scope(strategy) if flags_obj.use_synthetic_data: synthetic_util.set_up_synthetic_data() input_fn = common.get_synth_input_fn( height=cifar_preprocessing.HEIGHT, width=cifar_preprocessing.WIDTH, num_channels=cifar_preprocessing.NUM_CHANNELS, num_classes=cifar_preprocessing.NUM_CLASSES, dtype=flags_core.get_tf_dtype(flags_obj), drop_remainder=True) else: synthetic_util.undo_set_up_synthetic_data() input_fn = cifar_preprocessing.input_fn train_input_dataset = input_fn( is_training=True, data_dir=flags_obj.data_dir, batch_size=flags_obj.batch_size, parse_record_fn=cifar_preprocessing.parse_record, datasets_num_private_threads=flags_obj.datasets_num_private_threads, dtype=dtype, # Setting drop_remainder to avoid the partial batch logic in normalization # layer, which triggers tf.where and leads to extra memory copy of input # sizes between host and GPU. drop_remainder=(not flags_obj.enable_get_next_as_optional)) eval_input_dataset = None if not flags_obj.skip_eval: eval_input_dataset = input_fn( is_training=False, data_dir=flags_obj.data_dir, batch_size=flags_obj.batch_size, parse_record_fn=cifar_preprocessing.parse_record) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA eval_input_dataset = eval_input_dataset.with_options(options) steps_per_epoch = ( cifar_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size) lr_schedule = 0.1 if flags_obj.use_tensor_lr: initial_learning_rate = common.BASE_LEARNING_RATE * flags_obj.batch_size / 128 lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay( boundaries=list(p[1] * steps_per_epoch for p in LR_SCHEDULE), values=[initial_learning_rate] + list(p[0] * initial_learning_rate for p in LR_SCHEDULE)) with strategy_scope: optimizer = common.get_optimizer(lr_schedule) model = resnet_cifar_model.resnet56(classes=cifar_preprocessing.NUM_CLASSES) model.compile( loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=(['sparse_categorical_accuracy'] if flags_obj.report_accuracy_metrics else None), run_eagerly=flags_obj.run_eagerly) train_epochs = flags_obj.train_epochs callbacks = common.get_callbacks() if not flags_obj.use_tensor_lr: lr_callback = LearningRateBatchScheduler( schedule=learning_rate_schedule, batch_size=flags_obj.batch_size, steps_per_epoch=steps_per_epoch) callbacks.append(lr_callback) # if mutliple epochs, ignore the train_steps flag. if train_epochs <= 1 and flags_obj.train_steps: steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch) train_epochs = 1 num_eval_steps = (cifar_preprocessing.NUM_IMAGES['validation'] // flags_obj.batch_size) validation_data = eval_input_dataset if flags_obj.skip_eval: if flags_obj.set_learning_phase_to_train: # TODO(haoyuzhang): Understand slowdown of setting learning phase when # not using distribution strategy. tf.keras.backend.set_learning_phase(1) num_eval_steps = None validation_data = None if not strategy and flags_obj.explicit_gpu_placement: # TODO(b/135607227): Add device scope automatically in Keras training loop # when not using distribition strategy. no_dist_strat_device = tf.device('/device:GPU:0') no_dist_strat_device.__enter__() history = model.fit(train_input_dataset, epochs=train_epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks, validation_steps=num_eval_steps, validation_data=validation_data, validation_freq=flags_obj.epochs_between_evals, verbose=2) eval_output = None if not flags_obj.skip_eval: eval_output = model.evaluate(eval_input_dataset, steps=num_eval_steps, verbose=2) if not strategy and flags_obj.explicit_gpu_placement: no_dist_strat_device.__exit__() stats = common.build_stats(history, eval_output, callbacks) return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_keras():\n epochs = 2\n strategy = tf.distribute.MirroredStrategy()\n global_batch_size = strategy.num_replicas_in_sync * 32\n train_dataset = create_dataset(global_batch_size)\n\n with strategy.scope():\n model = ResNet50(input_shape=(224, 224, 3), num_classes=1000)\n model....
[ "0.6698724", "0.6544087", "0.64400065", "0.640361", "0.63593155", "0.63541394", "0.6300235", "0.6192498", "0.61555034", "0.6152484", "0.61209786", "0.60836124", "0.6074417", "0.605095", "0.60388404", "0.6032587", "0.6015973", "0.6003886", "0.59807396", "0.59580404", "0.595420...
0.60113895
17
Parse the auditbeat log file, to generate audit event model and write to the result file(optional)
def parse(self, output=True): if not self.type == LogType.audit: log.error("LogParser doesn't support nonetype yet.") return stashes = list() with open(self.path_log, 'r') as f: for line in f.readlines(): event: Dict = json.loads(line) keys = event.keys() # drop irrelevant keys of dict for key in DROPS: if key in event.keys(): event.pop(key) # retrieve json info timestamp, process, file = None, None, None if "@timestamp" in event.keys(): timestamp = event["@timestamp"] if "process" in event.keys(): process = event["process"] if "file" in event.keys(): file = event["file"] try: audit:Dict = event["auditd"] except KeyError: raise KeyError(f"line: {line} does not have audit field, parse failed.") # recontruct audit unit paths, session = None, None if "paths" in audit.keys(): paths = audit["paths"] if "session" in audit.keys(): session = audit["session"] try: msg_type, result, sequence, data = \ audit["message_type"],audit["result"], audit["sequence"], audit["data"] except KeyError: raise KeyError(f"Audit {audit} does not have certain keys, parse failed.") auditd = Auditd(paths, msg_type, sequence, result, data, session) beat_state = BeatState(timestamp, process, file, auditd) # # TODO: the current code is to add dict format data # self.events.append(beat_state) stashes.append(beat_state) return stashes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r...
[ "0.61394227", "0.6064906", "0.59986395", "0.5834477", "0.5793847", "0.57524127", "0.5733288", "0.5686557", "0.5646877", "0.55704165", "0.55531", "0.5498819", "0.5413356", "0.53873146", "0.5380587", "0.5363391", "0.5344947", "0.53289014", "0.53170085", "0.5265918", "0.5260259"...
0.63416183
0
Initialise clusters by alternating the bins to which the vectors are assigned.
def alternating_bins_initialisation(self, pixel_data, a=None, b=None): if not a or not b: a = 0 b = len(pixel_data) clusters = defaultdict(list) for i in range(a, b): # selecting sevens as data set clusters[i % self.K].append(pixel_data[i]) return clusters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_...
[ "0.73569727", "0.6890101", "0.66579676", "0.65458935", "0.6461489", "0.6289846", "0.62089014", "0.6200618", "0.61897707", "0.60647523", "0.60470694", "0.603594", "0.6009594", "0.6008557", "0.596944", "0.59262115", "0.5919822", "0.58949685", "0.58171034", "0.58091116", "0.5795...
0.700094
1
Setup and calculate codebook vectors
def calculate_cb_vecs(self, clusters): if not clusters or not clusters[0]: return None # :param:`n` is the dimension of the vectors n = len(clusters[0][0]) # Initialize the codebook vectors to 0 cb_vectors = np.zeros([n * self.K]).reshape(self.K, n) for i in range(self.K): sum = np.zeros([n], dtype=np.uint).reshape(1, n) for vector in clusters[i]: sum += vector # divide the sum of the vectors by the size of the cluster cb_vectors[i] = np.divide(sum, len(clusters[i])) return cb_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def setUp(self):\n self.vencode_obj = iext.GetVen...
[ "0.6225616", "0.6097219", "0.5992996", "0.5844046", "0.58303875", "0.5777067", "0.5741174", "0.5690402", "0.56594807", "0.56027544", "0.560168", "0.55921", "0.5578438", "0.5565369", "0.55545706", "0.55346024", "0.5503242", "0.5489077", "0.54638463", "0.54570204", "0.5454407",...
0.56531245
9
Runs the Kmeans algorithm.
def fix(self, pixel_data): # :param:`m` is the size of :param:`pixel_data` m = len(pixel_data) # tempDist stores distance between training points and codebook vectors tempDist = np.zeros([self.K]).reshape(self.K, 1) # tempCluster stores previous cluster composition tempCluster = defaultdict(list) # mat will contain the cluster numbers to reassign each vector mat = np.zeros([m]).reshape(m, 1) tempMat = np.ones([m]).reshape(m, 1) j = 0 # initialise clusters clusters = self.alternating_bins_initialisation(pixel_data) while not np.array_equal(tempMat, mat): # algorithm runs until the sets do not change tempMat = copy.deepcopy(mat) # cacluate codebook vectors for each cluster cb_vectors = self.calculate_cb_vecs(clusters) # preserve cluster information tempCluster = copy.deepcopy(clusters) for key in clusters: # for each cluster for index in range(len(clusters[key])): # for the length of the cluster vector = clusters[key][index] for i in range(self.K): # save distances to each codebook vector tempDist[i] = np.c_[np.linalg.norm(vector - cb_vectors[i])] mat[j][0] = np.c_[np.argmin(tempDist)] # mat[j][0] contains the minimum distance of the vector in the jth position # in the cluster dictionary j += 1 # reset cluster information clusters.clear() # reassign training points to clusters according to distance from codebook vectors # Note: new clusters are allocated in order of membership occurrence while(j >= 1): for k in tempCluster: for idx in range(len(tempCluster[k])): clusters[mat[m - j][0]].append(tempCluster[k][idx]) j -= 1 # update the codebook vectors at the end of the loop cb_vectors = self.calculate_cb_vecs(clusters) self.cb_vectors = copy.deepcopy(cb_vectors) return cb_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_algorithm(self):\r\n vectors = self.vectorize_data()\r\n kmeans = KMeans(init='k-means++', n_clusters=self.cluster_amount, n_init=10)\r\n kmeans.fit(vectors)\r\n return self.cluster_tweet(kmeans.labels_)", "def main():\n data = Dummy(n_samples=500, n_dim=3)\n X = data....
[ "0.78048205", "0.77288556", "0.7313263", "0.7294566", "0.7138447", "0.7079959", "0.70205575", "0.7020232", "0.6925354", "0.69126034", "0.6898227", "0.6897526", "0.68272984", "0.6826028", "0.6779355", "0.67769915", "0.6771654", "0.6768111", "0.6721485", "0.6702656", "0.6651811...
0.0
-1
Get the codebook vectors.
def get_cb_vectors(self): return self.cb_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vectors(self):\n return self.vecs[:]", "def get_vectors(self, corpus_size, vectors_size, vectors_type):\n vectors = np.zeros((corpus_size, vectors_size))\n for i in range(0, corpus_size):\n prefix = vectors_type + '_' + str(i)\n vectors[i] = self.model_dbow.docv...
[ "0.6734293", "0.6306046", "0.6234584", "0.6101457", "0.6060832", "0.5943614", "0.59248465", "0.58844465", "0.58299667", "0.5818226", "0.5769358", "0.5711672", "0.5681364", "0.5677561", "0.5665824", "0.5648895", "0.5648038", "0.5617257", "0.5584856", "0.5566153", "0.55533874",...
0.6465732
1
Extracts features from the final codebook vectors using the L2 norm. The way it works is that we pass in the data as an argument and the function produces len(data) feature vectors such that f(x_i)=[a_1 ... a_K] and a_j = || x_i c_j || where c_j is the codebook vector.
def extract_features(self, data): # TODO: Should feature extraction be done on the testing data? In the lecture notes # TODO: it is not done with the training data, but with the test data. # TODO: Maybe we should use the validate data when we do cross-validation. features = np.zeros([len(data)*self.K]).reshape(len(data), self.K) for i in range(len(data)): for j in range(self.K): features[i][j] = np.linalg.norm(data[i] - self.cb_vectors[j]) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_liwc_features(train_data, test_data):\n print(\"getting liwc features\")\n train_liwc_matrix = []\n test_liwc_matrix = []\n for phrase in train_data:\n liwc_scores = word_category_counter.score_text(phrase)\n feature_vector = []\n for key in liwc_categories:\n if...
[ "0.60243773", "0.59016556", "0.58867896", "0.5756956", "0.573142", "0.56395006", "0.56027967", "0.55864096", "0.55687845", "0.5554088", "0.55537987", "0.54962337", "0.54528916", "0.54197", "0.53998107", "0.5392125", "0.53568494", "0.5354972", "0.5350437", "0.5302239", "0.5282...
0.6925255
0
Make the first block in a blockchain.
def make_genesis_block(): block = Block(index=0, timestamp=datetime.now(), data="Genesis Block", previous_hash="0") return block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createFirstBlock(self):\n firstBlock = Block(0, self.__currentTransactionsList, 0, '00')\n self.__chain.append(firstBlock)", "def create_genesis_block(self):\n index = 0\n transactions = []\n timestamp = 0.0\n previous_hash = \"0\"*64\n block = Block(index=ind...
[ "0.82950324", "0.72951174", "0.7135279", "0.7082487", "0.7036506", "0.7036506", "0.7006431", "0.6923667", "0.6863689", "0.6726204", "0.66985613", "0.6642569", "0.6592184", "0.6535945", "0.65345776", "0.6523206", "0.64736736", "0.6410874", "0.639564", "0.63902736", "0.6376658"...
0.7289684
2
Initializing Cin as o, l2 as empty list and final as empty string
def add(num1, num2, Cin): Cin=0 l2=[] final="" """Using for loop for starting addition from the last indexed number""" for i in range(len(num1)-1,-1,-1): bit1=num1[i]#storing each indexed() value of num1 in bit1 bit2=num2[i]#storing each indexed() value of num2 in bit2 S1=g.XOR(bit1, bit2)#calling XOR() method of gates module and passing value in it. S2=g.NAND(S1, Cin)#calling NAND() method of gates module and passing value in it. S3=g.OR(S1, Cin)#calling OR() method of gates module and passing value in it. SUM=g.AND(S2, S3)#calling AND() method of gates module and passing value in it. C1=g.AND(bit1, bit2)#calling AND() method of gates module and passing value in it. C2=g.AND(S1, Cin)#calling AND() method of gates module and passing value in it. C3=g.NOR(C1, C2)#calling NOR() method of gates module and passing value in it. Cout=g.NOT(C3)#calling NOT() method of gates module and passing value in it. Cin=Cout#declaring cout as cin for next bit addition l2.append(SUM)#adding SUM to l2 l3=l2[::-1]##reversing the value of l2 final+=str(l2[-1])#converting the list value to string final1=final[::-1]#reversing string value return final1 #returning the binary addition as string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, o0=(0, 0), o1=(0, 0)):\n self._output = [o0, o1]", "def __init__(self,l=None,c=True):\r\n\t\t\r\n\t\t# default None to zero\r\n\t\tif l is None:\r\n\t\t\tl = 0\r\n\t\t\t\r\n\t\tif l == []:\r\n\t\t\tl = 0\r\n\t\t\r\n\t\t# attempt to translate from string\r\n\t\ttry:\r\n\t\t\tl = Li._tran...
[ "0.6156051", "0.60318625", "0.5904705", "0.5787834", "0.5713939", "0.56481445", "0.56018484", "0.5577135", "0.5547001", "0.55375147", "0.54676926", "0.5461734", "0.54217833", "0.5397691", "0.5375492", "0.5349343", "0.53472084", "0.53472084", "0.53247607", "0.53019875", "0.529...
0.0
-1
NetflowFilters a model defined in Swagger
def __init__(self, node_b=None, qos_type=None, device_interfaces=None, ports=None, protocol=None, ip_version=None, netflow_devices=None, top=None, app_type=None, nbar_application_names=None, node_a=None, conversation=None, if_names=None, direction=None): # noqa: E501 # noqa: E501 self._node_b = None self._qos_type = None self._device_interfaces = None self._ports = None self._protocol = None self._ip_version = None self._netflow_devices = None self._top = None self._app_type = None self._nbar_application_names = None self._node_a = None self._conversation = None self._if_names = None self._direction = None self.discriminator = None if node_b is not None: self.node_b = node_b if qos_type is not None: self.qos_type = qos_type if device_interfaces is not None: self.device_interfaces = device_interfaces if ports is not None: self.ports = ports if protocol is not None: self.protocol = protocol if ip_version is not None: self.ip_version = ip_version if netflow_devices is not None: self.netflow_devices = netflow_devices if top is not None: self.top = top if app_type is not None: self.app_type = app_type if nbar_application_names is not None: self.nbar_application_names = nbar_application_names if node_a is not None: self.node_a = node_a if conversation is not None: self.conversation = conversation if if_names is not None: self.if_names = if_names if direction is not None: self.direction = direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_filters(self):", "def get_timeline_filters(self, req):", "def filter(self, filters):", "def search_model():\n search_condition = request.stream.read()\n try:\n search_condition = json.loads(search_condition if search_condition else \"{}\")\n except Exception:\n raise ParamValue...
[ "0.60868007", "0.5681885", "0.54847294", "0.54048103", "0.52722216", "0.52648157", "0.5253178", "0.525117", "0.5230509", "0.5183991", "0.5142966", "0.5128541", "0.5121736", "0.508809", "0.50717574", "0.5034174", "0.5003462", "0.49971294", "0.49885327", "0.4967403", "0.4960733...
0.0
-1
Sets the node_b of this NetflowFilters.
def node_b(self, node_b): self._node_b = node_b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_node(self, b):\n return b == self.__node_b", "def setB(self, b):\n\t\tself.b = int(b)", "def b(self, b):\n\n self._b = b", "def add_bilink(self, nodeport_a, nodeport_b, bilink):", "def set_bias_for_node(node: Node, value: np.ndarray):\n bias = get_bias_for_node(node)\n if bias is...
[ "0.61645985", "0.6144432", "0.60478383", "0.5791615", "0.576705", "0.5688763", "0.5581127", "0.5452997", "0.5451288", "0.53915596", "0.53915596", "0.5351376", "0.5198766", "0.5175632", "0.5073717", "0.5065744", "0.50484663", "0.50223225", "0.50185555", "0.50114703", "0.500081...
0.8324408
0
Sets the qos_type of this NetflowFilters.
def qos_type(self, qos_type): self._qos_type = qos_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def qos(self, qos: int):\n if qos is not None and qos > 2: # noqa: E501\n raise ValueError(\"Invalid value for `qos`, must be a value less than or equal to `2`\") # noqa: E501\n if qos is not None and qos < 0: # noqa: E501\n raise ValueError(\"Invalid value for `qos`, must be...
[ "0.69267035", "0.665194", "0.6260876", "0.6227411", "0.61854005", "0.6163835", "0.61467403", "0.6086317", "0.6053018", "0.60085255", "0.5876559", "0.5848173", "0.56665426", "0.56485814", "0.5635002", "0.559807", "0.53704786", "0.52488047", "0.5220447", "0.5210421", "0.5194985...
0.8767736
0
Sets the device_interfaces of this NetflowFilters.
def device_interfaces(self, device_interfaces): self._device_interfaces = device_interfaces
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_interfaces(self, network_interfaces):\n\n self._network_interfaces = network_interfaces", "def netflow_devices(self, netflow_devices):\n\n self._netflow_devices = netflow_devices", "def ifaces(self, ifaces):\n \n self._ifaces = ifaces", "def update_interfaces_config(se...
[ "0.68550044", "0.63056403", "0.6121295", "0.6114787", "0.60211456", "0.59780586", "0.5934186", "0.58451796", "0.58451796", "0.57590055", "0.5745199", "0.56535774", "0.54265004", "0.5393894", "0.5360232", "0.53313124", "0.53304845", "0.53277284", "0.5235097", "0.51627266", "0....
0.81078243
0
Sets the ports of this NetflowFilters.
def ports(self, ports): self._ports = ports
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_ports(self, ports, **kwargs):\n pass", "def modify_rstp_ports(self, ports, **kwargs):\n pass", "def https_ports(self, https_ports):\n\n self._https_ports = https_ports", "def http_ports(self, http_ports):\n\n self._http_ports = http_ports", "def make_external_ports(se...
[ "0.75998574", "0.6880327", "0.6804348", "0.6704648", "0.65429854", "0.6481354", "0.6428148", "0.61421347", "0.60838145", "0.60838145", "0.60838145", "0.6080848", "0.6070129", "0.60178405", "0.59989095", "0.5934444", "0.59169525", "0.5844672", "0.58260345", "0.5825795", "0.580...
0.8199667
0
Sets the protocol of this NetflowFilters.
def protocol(self, protocol): self._protocol = protocol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def switch_protocol(self):\n with self._lock:\n if self.protocol == 'rtmp':\n self._protocol = 'hls'\n else:\n self._protocol = 'rtmp'", "def fill_protocol(self, data):\n self.protocol = get_optional_value(data, self.PROTOCOL, \"http\")\n s...
[ "0.67306274", "0.66096395", "0.65845597", "0.62627715", "0.6239148", "0.6161418", "0.6159605", "0.605314", "0.5974029", "0.59525305", "0.5931918", "0.59305394", "0.58953136", "0.5892481", "0.5892481", "0.5892481", "0.5892481", "0.5892481", "0.5873497", "0.58330894", "0.576807...
0.74954563
3
Sets the ip_version of this NetflowFilters.
def ip_version(self, ip_version): self._ip_version = ip_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vip(self, vip):\n\n self._vip = vip", "def protocol_version(self, protocol_version):\n\n self._protocol_version = protocol_version", "def setVersion(self, version) :\n if version is not None :\n try :\n self.version = [int(p) for p in version.split(\".\")]\n ...
[ "0.63388056", "0.58770186", "0.5694638", "0.5656582", "0.5656582", "0.56357116", "0.55922115", "0.55871147", "0.5562405", "0.55404204", "0.5522308", "0.55216855", "0.5502596", "0.5502596", "0.5502596", "0.5491824", "0.54864305", "0.54864305", "0.54864305", "0.54864305", "0.54...
0.8065355
0
Sets the netflow_devices of this NetflowFilters.
def netflow_devices(self, netflow_devices): self._netflow_devices = netflow_devices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def devices(self, devices):\n\n self._devices = devices", "def devices(self, devices):\n\n self._devices = devices", "def set_devices(args):\n global devices\n if args is not None:\n devices = [torch.device(i) for i in ast.literal_eval('[' + args + ']')]\n torch.cuda.set_devic...
[ "0.6428995", "0.6428995", "0.5585992", "0.54547", "0.5365216", "0.5246433", "0.51710474", "0.50898916", "0.50661755", "0.5038719", "0.5004575", "0.49756554", "0.4974928", "0.49667272", "0.4923213", "0.48840016", "0.48089606", "0.48036066", "0.47994307", "0.4792203", "0.479017...
0.86378056
0
Sets the top of this NetflowFilters.
def top(self, top): self._top = top
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bb_top(self, bb_top: float):\n\n self._bb_top = bb_top", "def top(self):\n # Sets our Z value to one.\n self.setZValue(1)\n # Set every colliding items Z value to 0\n for sibling in self.collidingItems():\n sibling.setZValue(0)", "def always_top(self, value: bo...
[ "0.67293346", "0.66441184", "0.66229576", "0.6613329", "0.6542814", "0.65164036", "0.6361207", "0.6278201", "0.62754864", "0.620966", "0.60119075", "0.60119075", "0.60119075", "0.59842026", "0.59842026", "0.5849825", "0.58208567", "0.5782378", "0.5741379", "0.5739408", "0.572...
0.73888093
0
Sets the app_type of this NetflowFilters.
def app_type(self, app_type): self._app_type = app_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _app_type(self):\n return self._event['app_type']", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def setFilter(self, type: int, filter: int) -> None:\n ...", "def set_type(self, type):\n self._type = type", "def se...
[ "0.6110402", "0.6007707", "0.6007707", "0.5881547", "0.5826859", "0.5705285", "0.5672125", "0.5645346", "0.5516108", "0.5502036", "0.5485557", "0.54566836", "0.5454073", "0.5433174", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.54322046", "0.543220...
0.8083322
0
Sets the nbar_application_names of this NetflowFilters.
def nbar_application_names(self, nbar_application_names): self._nbar_application_names = nbar_application_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)", "def set_name(self, application_name):\r\n self._name = application_name", "def app_names(self):\n return self.get_app_names()", "def app_name(self, value):\n self._...
[ "0.5579578", "0.5503502", "0.51913893", "0.5184525", "0.51769996", "0.5006621", "0.4993549", "0.48435128", "0.48244205", "0.4778834", "0.4738988", "0.47321886", "0.47280967", "0.47159183", "0.4684673", "0.46767935", "0.46593088", "0.46463352", "0.4644338", "0.46052152", "0.46...
0.86759675
0
Sets the node_a of this NetflowFilters.
def node_a(self, node_a): self._node_a = node_a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_node(self, node):\n self.__node = node", "def from_node(self, a):\n return a == self.__node_a", "def nodes(self, nodes_array):\n self.nodes_set = nodes_array", "def set_node(self, name, state):\n self.source_net.nodes[name] = state", "def __call__(self, node_A):\n ...
[ "0.58251303", "0.5789575", "0.5745441", "0.5615295", "0.557767", "0.5573292", "0.5538027", "0.55000454", "0.5496195", "0.54542196", "0.5382919", "0.5307776", "0.51498705", "0.5089265", "0.507674", "0.5060496", "0.5041549", "0.50394356", "0.4960041", "0.49577066", "0.4915388",...
0.79782516
0
Sets the conversation of this NetflowFilters.
def conversation(self, conversation): self._conversation = conversation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_conversation(self, conversation):\r\n self.conversation = conversation", "def set_gift_conversation(self, conversation_string):\r\n self.gift_conversation = conversation_string", "def update(self, conversation):\n self.content_type = \"application/json\"\n self.method = \"PA...
[ "0.7668855", "0.6350055", "0.52989596", "0.5078657", "0.50487155", "0.50432694", "0.49181578", "0.49066126", "0.4869592", "0.48333606", "0.4763982", "0.47448006", "0.47074327", "0.46813306", "0.46577984", "0.45800743", "0.4569655", "0.45474747", "0.45351785", "0.44976678", "0...
0.72746265
1
Sets the if_names of this NetflowFilters.
def if_names(self, if_names): self._if_names = if_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ifaces_init(*ifnames):\n for ifname in ifnames:\n _set_eth_admin_state(ifname, schema.InterfaceState.ABSENT)", "def setNameFilters(self, filters):\n if self._completer:\n self._completer.model().setNameFilters(filters)", "def setFilters(self, filters):\n self.__filters = ...
[ "0.5829484", "0.5813386", "0.5766594", "0.5461771", "0.5420651", "0.53666186", "0.5335555", "0.5328612", "0.5256119", "0.5160971", "0.5098839", "0.50947213", "0.5075196", "0.5055912", "0.5040678", "0.4927203", "0.49013257", "0.48951134", "0.48855725", "0.48845008", "0.4844863...
0.8094472
0
Sets the direction of this NetflowFilters.
def direction(self, direction): self._direction = direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_direction(self, direction: str) -> None:\n self.wink.set_fan_direction(direction)", "def set_direction(self, new_dir):\n self.__direction = new_dir", "def setDirection(self,stepDir = 2):\n pass", "def setdirection(self, *args, **kwargs):\n return _coordsys.coordsys_setdire...
[ "0.7139598", "0.7040138", "0.7006834", "0.69987583", "0.6970682", "0.68825686", "0.6802282", "0.6730239", "0.63998103", "0.6399511", "0.6314273", "0.6307516", "0.63062197", "0.63062197", "0.62925655", "0.62565696", "0.6163797", "0.6163797", "0.60774386", "0.60496444", "0.6030...
0.7080141
1
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(NetflowFilters, dict): for key, value in self.items(): result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n f...
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.6900...
0.0
-1
Returns the string representation of the model
def to_str(self): return pprint.pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n ...
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442...
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n ...
[ "0.75577617", "0.73375154", "0.6986672", "0.698475", "0.6944995", "0.692333", "0.6899106", "0.6898902", "0.68146646", "0.6806209", "0.6753795", "0.67497987", "0.6744008", "0.6700308", "0.6691256", "0.6674591", "0.6658083", "0.66091245", "0.6606931", "0.6601862", "0.6563738", ...
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, NetflowFilters): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if i...
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", ...
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n ...
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Get a list of all Accounts authorized for the provided token. Get a list of Transactions pages that satisfy a timebased Transaction query.
def get_transactions(self, account_id, from_date=None, to_date=None, page_size=None, type_list=None): endpoint = 'accounts/{0}/transactions'.format(account_id) params = {} if from_date: params["from"] = from_date if to_date: params["to"] = to_date if page_size: params["pageSize"] = page_size if type_list: type_list = "%2C".join(type_list) params["type"] = type_list return self._api.request(endpoint, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def list_accounts(self):\n pass", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n ...
[ "0.6856845", "0.6346517", "0.6344958", "0.6305827", "0.62168396", "0.61625844", "0.61552113", "0.6129915", "0.6112865", "0.60796094", "0.60191786", "0.5938606", "0.59356767", "0.58972865", "0.58753717", "0.58455265", "0.5837338", "0.5836202", "0.5822045", "0.5800987", "0.5792...
0.60980934
9
Get a list of all Accounts authorized for the provided token. Get the details of a single Account Transaction.
def get_transition_details(self, account_id, transaction_id): endpoint = 'accounts/{0}/transactions{1}'.format(account_id, transaction_id) return self._api.request(endpoint)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n ...
[ "0.6953453", "0.67745745", "0.67192453", "0.6699322", "0.665223", "0.66492224", "0.64944196", "0.64375126", "0.6423205", "0.6415874", "0.64149076", "0.63608265", "0.635944", "0.63580346", "0.63404256", "0.6307913", "0.6304804", "0.62903816", "0.6289546", "0.62893355", "0.6263...
0.0
-1
Get a list of all Accounts authorized for the provided token. Get a range of Transactions for an Account based on the Transaction IDs.
def get_transaction_list(self, account_id, from_date, to_date, type_list=None): endpoint = 'accounts/{0}/transactions/idrange'.format(account_id) params = {} params["from"] = from_date params["to"] = to_date if type_list: type_list = "%2C".join(type_list) params["type"] = type_list return self._api.request(endpoint, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n ...
[ "0.68266255", "0.6736031", "0.65336376", "0.6370594", "0.63402724", "0.6323471", "0.62967646", "0.6295912", "0.62643474", "0.6197194", "0.6178552", "0.6152341", "0.60816187", "0.6077081", "0.60660774", "0.6049993", "0.60333514", "0.6030597", "0.6026027", "0.60059094", "0.5975...
0.66186446
2
Get a list of all Accounts authorized for the provided token. Get a range of Transactions for an Account starting at (but not including) a provided Transaction ID.
def get_transaction_list2(self, account_id, aid): endpoint = 'accounts/{0}/transactions/sinceid'.format(account_id) params = {} params["id"] = aid return self._api.request(endpoint, params=params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def get_transaction_list(self, account_id, from_date, to_date,\n type_list=None):\n endpoint = 'accounts/{0}/transactions/idrange'.format(account_id)\n\n params = {}\n\n par...
[ "0.6701486", "0.6636998", "0.6507858", "0.64012176", "0.63822055", "0.6338222", "0.6296368", "0.6234683", "0.6104485", "0.6088276", "0.6046946", "0.6040939", "0.60122126", "0.6005696", "0.5965876", "0.59531087", "0.59082997", "0.5885692", "0.5884602", "0.5837892", "0.58305675...
0.5752467
26
To add parents to database
def add_parent(session, df): try: for _, row in df.iterrows(): parent = Parent() parent.name = row['parent_name'] parent.family = row['family'] session.add(parent) except Exception as ex: session.rollback() raise ex else: session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_append_children_category(self):\n category = Category(catname='olympic games')\n category1 = Category(catname='Tennis')\n category.parents.append(category1)\n category.save()\n assert category.parents", "def add_parent(sender, instance, **kwargs):\n if not kwargs['c...
[ "0.6654415", "0.65198356", "0.6462206", "0.64579284", "0.6353594", "0.62729967", "0.6186695", "0.61660165", "0.6154463", "0.61290205", "0.61266243", "0.6095059", "0.6068027", "0.6065559", "0.6063551", "0.6041484", "0.6020467", "0.60039234", "0.60022867", "0.59916437", "0.5974...
0.6953736
0
To add child to database
def add_child(session, df): try: for _, row in df.iterrows(): child = Child() child.name = row['child_name'] child.residence = row['Residence'] father_obj = session.query(Parent).filter_by(name=row['father_name']).first() child.parents.append(father_obj) mother_obj = session.query(Parent).filter_by(name=row['mother_name']).first() child.parents.append(mother_obj) session.add(child) except Exception as ex: session.rollback() raise ex else: session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_child(self, child):\n name = name_displayer.display(child)\n birth = get_birth_or_fallback(self.dbstate.db, child)\n birth_date, birth_sort, birth_place = self.get_date_place(birth)\n death = get_death_or_fallback(self.dbstate.db, child)\n death_date, death_sort, death_pl...
[ "0.7669851", "0.716879", "0.7081737", "0.7025343", "0.6958452", "0.69013804", "0.68937933", "0.6841492", "0.68182874", "0.6781795", "0.6781795", "0.6744913", "0.6703005", "0.65205777", "0.65191615", "0.65191615", "0.6515472", "0.64840895", "0.6477812", "0.6470086", "0.6470086...
0.69629735
4
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL.
def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs): path = '/v1/global/root_outcome_group' url = request_ctx.base_api_url + path.format() response = client.get(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/root_outcome_group'\n url = request_ctx.base_api_url + path.format(account_id=account_id)\n response = client.get(request_ctx, url, **request_kwargs)\n\n return resp...
[ "0.7103075", "0.67741513", "0.56865895", "0.51907563", "0.5116719", "0.5102657", "0.5087065", "0.5047118", "0.4916807", "0.49087209", "0.48846796", "0.47762623", "0.47364914", "0.4677289", "0.46656385", "0.46637428", "0.46563548", "0.46534562", "0.45972314", "0.4521781", "0.4...
0.76904535
0
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL.
def redirect_to_root_outcome_group_for_context_accounts(request_ctx, account_id, **request_kwargs): path = '/v1/accounts/{account_id}/root_outcome_group' url = request_ctx.base_api_url + path.format(account_id=account_id) response = client.get(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def redirect_to_root_outcome_group_for_c...
[ "0.76904535", "0.67741513", "0.56865895", "0.51907563", "0.5116719", "0.5102657", "0.5087065", "0.5047118", "0.4916807", "0.49087209", "0.48846796", "0.47762623", "0.47364914", "0.4677289", "0.46656385", "0.46637428", "0.46563548", "0.46534562", "0.45972314", "0.4521781", "0....
0.7103075
1
Convenience redirect to find the root outcome group for a particular context. Will redirect to the appropriate outcome group's URL.
def redirect_to_root_outcome_group_for_context_courses(request_ctx, course_id, **request_kwargs): path = '/v1/courses/{course_id}/root_outcome_group' url = request_ctx.base_api_url + path.format(course_id=course_id) response = client.get(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_to_root_outcome_group_for_context_global(request_ctx, **request_kwargs):\n\n path = '/v1/global/root_outcome_group'\n url = request_ctx.base_api_url + path.format()\n response = client.get(request_ctx, url, **request_kwargs)\n\n return response", "def redirect_to_root_outcome_group_for_c...
[ "0.76904535", "0.7103075", "0.56865895", "0.51907563", "0.5116719", "0.5102657", "0.5087065", "0.5047118", "0.4916807", "0.49087209", "0.48846796", "0.47762623", "0.47364914", "0.4677289", "0.46656385", "0.46637428", "0.46563548", "0.46534562", "0.45972314", "0.4521781", "0.4...
0.67741513
2
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed).
def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, 'parent_outcome_group_id' : parent_outcome_group_id, } url = request_ctx.base_api_url + path.format(id=id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 've...
[ "0.61605686", "0.61406356", "0.5958074", "0.59414357", "0.5797429", "0.5668177", "0.5624319", "0.5621426", "0.561763", "0.5572149", "0.5539592", "0.55051327", "0.54824334", "0.5461233", "0.54360193", "0.5433018", "0.53988296", "0.5368346", "0.53458875", "0.52537507", "0.52454...
0.67553115
0
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed).
def update_outcome_group_accounts(request_ctx, account_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, 'parent_outcome_group_id' : parent_outcome_group_id, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n ...
[ "0.67553115", "0.61406356", "0.5958074", "0.59414357", "0.5797429", "0.5668177", "0.5624319", "0.5621426", "0.561763", "0.5572149", "0.5539592", "0.55051327", "0.54824334", "0.5461233", "0.54360193", "0.5433018", "0.53988296", "0.5368346", "0.53458875", "0.52537507", "0.52454...
0.61605686
1
Modify an existing outcome group. Fields not provided are left as is; unrecognized fields are ignored. When changing the parent outcome group, the new parent group must belong to the same context as this outcome group, and must not be a descendant of this outcome group (i.e. no cycles allowed).
def update_outcome_group_courses(request_ctx, course_id, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, 'parent_outcome_group_id' : parent_outcome_group_id, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_outcome_group_global(request_ctx, id, title=None, description=None, vendor_guid=None, parent_outcome_group_id=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n ...
[ "0.67553115", "0.61605686", "0.5958074", "0.59414357", "0.5797429", "0.5668177", "0.5624319", "0.5621426", "0.561763", "0.5572149", "0.5539592", "0.55051327", "0.54824334", "0.5461233", "0.54360193", "0.5433018", "0.53988296", "0.5368346", "0.53458875", "0.52537507", "0.52454...
0.61406356
2
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion will fail.
def delete_outcome_group_global(request_ctx, id, **request_kwargs): path = '/v1/global/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(id=id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", ...
[ "0.66492325", "0.6541948", "0.6461525", "0.6348538", "0.633461", "0.6283196", "0.6217388", "0.6203314", "0.6168106", "0.61588556", "0.6150082", "0.61439496", "0.6109929", "0.61078966", "0.6060277", "0.60184395", "0.6001307", "0.6001307", "0.59507966", "0.5928808", "0.5904541"...
0.662239
1
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion will fail.
def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_outcome_group_global(request_ctx, id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_080_group_delete(self):\n\n testf...
[ "0.662239", "0.6541948", "0.6461525", "0.6348538", "0.633461", "0.6283196", "0.6217388", "0.6203314", "0.6168106", "0.61588556", "0.6150082", "0.61439496", "0.6109929", "0.61078966", "0.6060277", "0.60184395", "0.6001307", "0.6001307", "0.59507966", "0.5928808", "0.5904541", ...
0.66492325
0
Deleting an outcome group deletes descendant outcome groups and outcome links. The linked outcomes themselves are only deleted if all links to the outcome were deleted. Aligned outcomes cannot be deleted; as such, if all remaining links to an aligned outcome are included in this group's descendants, the group deletion will fail.
def delete_outcome_group_courses(request_ctx, course_id, id, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}' url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_outcome_group_accounts(request_ctx, account_id, id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", ...
[ "0.66492325", "0.662239", "0.6541948", "0.6461525", "0.6348538", "0.6283196", "0.6217388", "0.6203314", "0.6168106", "0.61588556", "0.6150082", "0.61439496", "0.6109929", "0.61078966", "0.6060277", "0.60184395", "0.6001307", "0.6001307", "0.59507966", "0.5928808", "0.5904541"...
0.633461
5
List the immediate OutcomeLink children of the outcome group. Paginated.
def list_linked_outcomes_global(request_ctx, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/global/outcome_groups/{id}/outcomes' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self): # noqa: ANN201", "def get_children(self):\n\n pass", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def get_children(self):\r\n return self.children", "def GetChildren(self, *args, **kwargs):\n pass", "def get_chi...
[ "0.62007177", "0.60129935", "0.59958863", "0.58787924", "0.5869366", "0.58592945", "0.5843929", "0.57969826", "0.5790005", "0.5790005", "0.5790005", "0.5784237", "0.5758441", "0.5725369", "0.5718923", "0.5705861", "0.56908256", "0.56826895", "0.56826895", "0.56807274", "0.567...
0.5629413
25
List the immediate OutcomeLink children of the outcome group. Paginated.
def list_linked_outcomes_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self): # noqa: ANN201", "def get_children(self):\n\n pass", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def get_children(self):\r\n return self.children", "def GetChildren(self, *args, **kwargs):\n pass", "def get_chi...
[ "0.62007177", "0.60129935", "0.59958863", "0.58787924", "0.5869366", "0.58592945", "0.5843929", "0.57969826", "0.5790005", "0.5790005", "0.5790005", "0.5784237", "0.5758441", "0.5725369", "0.5718923", "0.5705861", "0.56908256", "0.56826895", "0.56826895", "0.56807274", "0.567...
0.525717
59
List the immediate OutcomeLink children of the outcome group. Paginated.
def list_linked_outcomes_courses(request_ctx, course_id, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self): # noqa: ANN201", "def get_children(self):\n\n pass", "def nav_children(self):\r\n return list(self.get_children().filter(show_in_nav=True))", "def get_children(self):\r\n return self.children", "def GetChildren(self, *args, **kwargs):\n pass", "def get_chi...
[ "0.62007177", "0.60129935", "0.59958863", "0.58787924", "0.5869366", "0.58592945", "0.5843929", "0.57969826", "0.5790005", "0.5790005", "0.5790005", "0.5784237", "0.5758441", "0.5725369", "0.5718923", "0.5705861", "0.56908256", "0.56826895", "0.56826895", "0.56807274", "0.567...
0.5057887
93
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes' payload = { 'outcome_id' : outcome_id, 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n payload = {...
[ "0.717459", "0.7163847", "0.7061997", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42...
0.7362988
0
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_global_outcome_id(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}' payload = { 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' :...
[ "0.7362988", "0.7163847", "0.7061997", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.4...
0.717459
1
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_accounts(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes' payload = { 'outcome_id' : outcome_id, 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' :...
[ "0.7362988", "0.717459", "0.7163847", "0.7061997", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42...
0.6714336
4
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_accounts_outcome_id(request_ctx, account_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}' payload = { 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' :...
[ "0.7362988", "0.717459", "0.7061997", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42...
0.7163847
2
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_courses(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes' payload = { 'outcome_id' : outcome_id, 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' :...
[ "0.7362988", "0.717459", "0.7163847", "0.7061997", "0.6714336", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.427...
0.66986096
5
Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
def create_link_outcome_courses_outcome_id(request_ctx, course_id, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}' payload = { 'title' : title, 'display_name' : display_name, 'description' : description, 'vendor_guid' : vendor_guid, 'mastery_points' : mastery_points, 'ratings[description]' : ratings_description, 'ratings[points]' : ratings_points, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id) response = client.put(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_link_outcome_global(request_ctx, id, outcome_id=None, title=None, display_name=None, description=None, vendor_guid=None, mastery_points=None, ratings_description=None, ratings_points=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes'\n payload = {\n 'outcome_id' :...
[ "0.7362988", "0.717459", "0.7163847", "0.6714336", "0.66986096", "0.5106874", "0.4798102", "0.47242922", "0.45309213", "0.44986874", "0.44323424", "0.44262272", "0.44031692", "0.4358714", "0.43203336", "0.43203336", "0.43050796", "0.43030095", "0.4294811", "0.42858493", "0.42...
0.7061997
3
Unlinking an outcome only deletes the outcome itself if this was the last link to the outcome in any group in any context. Aligned outcomes cannot be deleted; as such, if this is the last link to an aligned outcome, the unlinking will fail.
def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs): path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}' url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink(self, link_id):", "def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id)\n res...
[ "0.6474747", "0.63120115", "0.6281923", "0.61563367", "0.61477256", "0.6030046", "0.58153516", "0.56244844", "0.5609927", "0.5545071", "0.55448717", "0.54989296", "0.5490254", "0.5487666", "0.5484662", "0.54513216", "0.5432963", "0.54169786", "0.53966224", "0.5357432", "0.535...
0.6547148
0
Unlinking an outcome only deletes the outcome itself if this was the last link to the outcome in any group in any context. Aligned outcomes cannot be deleted; as such, if this is the last link to an aligned outcome, the unlinking will fail.
def unlink_outcome_accounts(request_ctx, account_id, id, outcome_id, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/outcomes/{outcome_id}' url = request_ctx.base_api_url + path.format(account_id=account_id, id=id, outcome_id=outcome_id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", ...
[ "0.6547148", "0.6474747", "0.6281923", "0.61563367", "0.61477256", "0.6030046", "0.58153516", "0.56244844", "0.5609927", "0.5545071", "0.55448717", "0.54989296", "0.5490254", "0.5487666", "0.5484662", "0.54513216", "0.5432963", "0.54169786", "0.53966224", "0.5357432", "0.5355...
0.63120115
2
Unlinking an outcome only deletes the outcome itself if this was the last link to the outcome in any group in any context. Aligned outcomes cannot be deleted; as such, if this is the last link to an aligned outcome, the unlinking will fail.
def unlink_outcome_courses(request_ctx, course_id, id, outcome_id, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/outcomes/{outcome_id}' url = request_ctx.base_api_url + path.format(course_id=course_id, id=id, outcome_id=outcome_id) response = client.delete(request_ctx, url, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink_outcome_global(request_ctx, id, outcome_id, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/outcomes/{outcome_id}'\n url = request_ctx.base_api_url + path.format(id=id, outcome_id=outcome_id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", ...
[ "0.6547148", "0.6474747", "0.63120115", "0.61563367", "0.61477256", "0.6030046", "0.58153516", "0.56244844", "0.5609927", "0.5545071", "0.55448717", "0.54989296", "0.5490254", "0.5487666", "0.5484662", "0.54513216", "0.5432963", "0.54169786", "0.53966224", "0.5357432", "0.535...
0.6281923
3
List the immediate OutcomeGroup children of the outcome group. Paginated.
def list_subgroups_global(request_ctx, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/global/outcome_groups/{id}/subgroups' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_children(self):\n\n pass", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_children(self):\r\n return self.children", "def children(self): # noqa: ANN201", "def get_...
[ "0.6219274", "0.62047005", "0.6197958", "0.6164997", "0.6146464", "0.6123517", "0.6123517", "0.6123517", "0.6090403", "0.60803944", "0.6052547", "0.6046702", "0.6037661", "0.60363", "0.60363", "0.6025189", "0.59819186", "0.59583235", "0.5945395", "0.59407395", "0.593865", "...
0.0
-1
List the immediate OutcomeGroup children of the outcome group. Paginated.
def list_subgroups_accounts(request_ctx, account_id, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_children(self):\n\n pass", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_children(self):\r\n return self.children", "def children(self): # noqa: ANN201", "def get_...
[ "0.6219274", "0.62047005", "0.6197958", "0.6164997", "0.6146464", "0.6123517", "0.6123517", "0.6123517", "0.6090403", "0.60803944", "0.6052547", "0.6046702", "0.6037661", "0.60363", "0.60363", "0.6025189", "0.59819186", "0.59583235", "0.5945395", "0.59407395", "0.593865", "...
0.0
-1
List the immediate OutcomeGroup children of the outcome group. Paginated.
def list_subgroups_courses(request_ctx, course_id, id, per_page=None, **request_kwargs): if per_page is None: per_page = request_ctx.per_page path = '/v1/courses/{course_id}/outcome_groups/{id}/subgroups' payload = { 'per_page' : per_page, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.get(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_children(self):\n\n pass", "def child_ents(self) -> Iterator['Entity']:\n for ent in self.vmf.entities:\n if self.id in ent.visgroup_ids:\n yield ent", "def get_children(self):\r\n return self.children", "def children(self): # noqa: ANN201", "def get_...
[ "0.6219274", "0.62047005", "0.6197958", "0.6164997", "0.6146464", "0.6123517", "0.6123517", "0.6123517", "0.6090403", "0.60803944", "0.6052547", "0.6046702", "0.6037661", "0.60363", "0.60363", "0.6025189", "0.59819186", "0.59583235", "0.5945395", "0.59407395", "0.593865", "...
0.0
-1
Creates a new empty subgroup under the outcome group with the given title and description.
def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs): path = '/v1/global/outcome_groups/{id}/subgroups' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, } url = request_ctx.base_api_url + path.format(id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def with_group(title: str) -> Generator[None, None, None]:\n if os.environ.get(\"GITHUB_ACTIONS\", \"false\") != \"true\":\n console.print(\"#\" * 10 + \" [bright_blue]\" + title + \"[/] \" + \"#\" * 10)\n yield\n ...
[ "0.5995512", "0.57896537", "0.5699493", "0.55475307", "0.55475307", "0.55366933", "0.55120105", "0.5418014", "0.5416043", "0.53805554", "0.53521186", "0.53274035", "0.5322022", "0.53077036", "0.529829", "0.5248793", "0.5231281", "0.5218727", "0.5214925", "0.5195291", "0.51951...
0.6064545
0
Creates a new empty subgroup under the outcome group with the given title and description.
def create_subgroup_accounts(request_ctx, account_id, id, title, description=None, vendor_guid=None, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/subgroups' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.bas...
[ "0.6064545", "0.5995512", "0.57896537", "0.5699493", "0.55475307", "0.55475307", "0.55366933", "0.55120105", "0.5418014", "0.5416043", "0.53805554", "0.53521186", "0.53274035", "0.5322022", "0.53077036", "0.529829", "0.5248793", "0.5231281", "0.5218727", "0.5214925", "0.51952...
0.5018026
52
Creates a new empty subgroup under the outcome group with the given title and description.
def create_subgroup_courses(request_ctx, course_id, id, title, description=None, vendor_guid=None, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/subgroups' payload = { 'title' : title, 'description' : description, 'vendor_guid' : vendor_guid, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_subgroup_global(request_ctx, id, title, description=None, vendor_guid=None, **request_kwargs):\n\n path = '/v1/global/outcome_groups/{id}/subgroups'\n payload = {\n 'title' : title,\n 'description' : description,\n 'vendor_guid' : vendor_guid,\n }\n url = request_ctx.bas...
[ "0.6064545", "0.5995512", "0.57896537", "0.5699493", "0.55475307", "0.55475307", "0.55366933", "0.55120105", "0.5418014", "0.5416043", "0.53521186", "0.53274035", "0.5322022", "0.53077036", "0.529829", "0.5248793", "0.5231281", "0.5218727", "0.5214925", "0.5195291", "0.519513...
0.53805554
10
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy organizational structure, but does not create copies of the outcomes themselves, only new links. The source group must be either global, from the same context as this outcome group, or from an associated account. The source group cannot be the root outcome group of its context.
def import_outcome_group_global(request_ctx, id, source_outcome_group_id, **request_kwargs): path = '/v1/global/outcome_groups/{id}/import' payload = { 'source_outcome_group_id' : source_outcome_group_id, } url = request_ctx.base_api_url + path.format(id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_...
[ "0.5898711", "0.5874123", "0.5813369", "0.57389224", "0.56055593", "0.55976254", "0.5595738", "0.55490917", "0.5544522", "0.5541461", "0.5518073", "0.53831655", "0.53829265", "0.53379554", "0.531845", "0.5302578", "0.52945095", "0.52933514", "0.5258613", "0.5246323", "0.52400...
0.5706337
4
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy organizational structure, but does not create copies of the outcomes themselves, only new links. The source group must be either global, from the same context as this outcome group, or from an associated account. The source group cannot be the root outcome group of its context.
def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs): path = '/v1/accounts/{account_id}/outcome_groups/{id}/import' payload = { 'source_outcome_group_id' : source_outcome_group_id, } url = request_ctx.base_api_url + path.format(account_id=account_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/courses/{course_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(course_...
[ "0.5898711", "0.5813369", "0.57389224", "0.5706337", "0.56055593", "0.55976254", "0.5595738", "0.55490917", "0.5544522", "0.5541461", "0.5518073", "0.53831655", "0.53829265", "0.53379554", "0.531845", "0.5302578", "0.52945095", "0.52933514", "0.5258613", "0.5246323", "0.52400...
0.5874123
1
Creates a new subgroup of the outcome group with the same title and description as the source group, then creates links in that new subgroup to the same outcomes that are linked in the source group. Recurses on the subgroups of the source group, importing them each in turn into the new subgroup. Allows you to copy organizational structure, but does not create copies of the outcomes themselves, only new links. The source group must be either global, from the same context as this outcome group, or from an associated account. The source group cannot be the root outcome group of its context.
def import_outcome_group_courses(request_ctx, course_id, id, source_outcome_group_id, **request_kwargs): path = '/v1/courses/{course_id}/outcome_groups/{id}/import' payload = { 'source_outcome_group_id' : source_outcome_group_id, } url = request_ctx.base_api_url + path.format(course_id=course_id, id=id) response = client.post(request_ctx, url, payload=payload, **request_kwargs) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_outcome_group_accounts(request_ctx, account_id, id, source_outcome_group_id, **request_kwargs):\n\n path = '/v1/accounts/{account_id}/outcome_groups/{id}/import'\n payload = {\n 'source_outcome_group_id' : source_outcome_group_id,\n }\n url = request_ctx.base_api_url + path.format(acc...
[ "0.5874123", "0.5813369", "0.57389224", "0.5706337", "0.56055593", "0.55976254", "0.5595738", "0.55490917", "0.5544522", "0.5541461", "0.5518073", "0.53831655", "0.53829265", "0.53379554", "0.531845", "0.5302578", "0.52945095", "0.52933514", "0.5258613", "0.5246323", "0.52400...
0.5898711
0
Parse challenge from a challenge response, cache it, and return it.
def _update_challenge(request: PipelineRequest, challenger: "PipelineResponse") -> HttpChallenge: challenge = HttpChallenge( request.http_request.url, challenger.http_response.headers.get("WWW-Authenticate"), response_headers=challenger.http_response.headers, ) ChallengeCache.set_challenge_for_url(request.http_request.url, challenge) return challenge
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_challenge(cls, response):\n links = _parse_header_links(response)\n try:\n authzr_uri = links['up']['url']\n except KeyError:\n raise errors.ClientError('\"up\" link missing')\n return (\n response.json()\n .addCallback(\n ...
[ "0.69334584", "0.57358587", "0.56640327", "0.56572354", "0.5530632", "0.54902357", "0.54403126", "0.54084736", "0.54046005", "0.54018176", "0.5199438", "0.5182611", "0.517387", "0.5165005", "0.512281", "0.5000999", "0.49986807", "0.4956543", "0.4911077", "0.49107736", "0.4908...
0.6099478
1
check if the reference folder is in place and all attributes are ready
def check_reference_ready(): # check to see if there is a manifest file in the default reference path manifest_file = os.path.join(settings.DEFAULT_REFERENCE_PATH, 'manifest.json') if not os.path.isfile(manifest_file): _log("manifest.json file cannot be found in the reference folder; simulation will NOT work!") return _log("reading manifest.json ..") # read the manifest file with open(manifest_file, 'r') as manifest: data = json.load(manifest) reference_fasta = os.path.join(settings.DEFAULT_REFERENCE_PATH, data["reference"]) if not os.path.isfile(reference_fasta): _log("genome reference file (.fasta | .fa) cannot be found in the reference folder; simulation will NOT work!") return _log("found all required simulation files in place; simulation is READY!") settings.REFERENCE_READY = True settings.INPUT_FILES = {"reference": data['reference'], "targets": 'dummy'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))", "def copy_file_check(self):\n pass", "def _before_reference_check(self, maya_file, client_data=None...
[ "0.6192987", "0.60495603", "0.5934274", "0.59128857", "0.58630824", "0.5849084", "0.5817839", "0.5815572", "0.5815572", "0.57971984", "0.57678586", "0.5761644", "0.57452965", "0.5725083", "0.5716483", "0.5684965", "0.5675776", "0.5654527", "0.5647706", "0.5647706", "0.5647706...
0.7016824
0
Callback to be called whenever the system state has changed. Checks whether or not the step has to be advanced or not
def updateState(self): if ('cutting' in self.step_ops) and (self.cut_state.user_cutting): self.step_ops['cutting'] = True if ('cooking' in self.step_ops) and (self.cut_state.user_cooking): self.step_ops['cooking'] = True # TODO: add the rest of the operations advance = True # Check if ALL operations are complete for op in self.step_ops: if self.step_ops[op] == False: advance = False break if advance: self.nextStep()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()", "def has_state_changed(self) -> bool:\r\n ...", "def _on_step(self) -> bool:\n # print(\"locals \", self.locals)\n # # what timestep you think\n # print(\"timestep \",CustomCallback.step)...
[ "0.6339768", "0.6194187", "0.6136971", "0.5964047", "0.5904102", "0.5903155", "0.5893415", "0.5889973", "0.58527935", "0.58454347", "0.5844172", "0.58329093", "0.57805914", "0.57780147", "0.5772509", "0.5751888", "0.573898", "0.57344913", "0.5711796", "0.5696087", "0.56890595...
0.6468291
0
Roll a 6 sided dice.
def roll_dice(): numbers = ['1', '2', '3', '4', '5', '6'] return random.choice(numbers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roll_dice():\n return (random.randint(1, 6) + random.randint(1, 6))", "def roll_dice(self):\n self.roll = (random.randint(1,6), random.randint(1,6))\n return self.roll", "def roll_dice():\n roll = random.randint(1, 6)\n return roll", "def roll_dice(num_rolls, dice=six_sided):\n ...
[ "0.7968151", "0.7608066", "0.7531591", "0.7409087", "0.73609334", "0.72943085", "0.7153998", "0.7130241", "0.71107805", "0.7101462", "0.70783186", "0.7048907", "0.7019933", "0.70183617", "0.70042896", "0.6980534", "0.695242", "0.69513273", "0.69413614", "0.69020563", "0.68830...
0.6365586
60
Constructor for thread that will request the RSS of a particular podcast series, parse the series details and episode information, and save the information w/`storer`
def __init__(self, storer, series, i): super(EpisodeWorker, self).__init__() self.storer = storer self.series = series # All series self.i = i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n while self.i < len(self.series):\n # Grab line + RSS\n s = self.series[self.i]\n rss = self.request_rss(s.feedUrl)\n\n # Compose Episodes\n ep_dicts = []\n for entry in rss['entries']:\n ep_dicts.append(Episode(s, entry).__dict__)\n\n # Build result dic...
[ "0.666735", "0.582353", "0.5599041", "0.5515614", "0.5502834", "0.54765725", "0.54175603", "0.5364501", "0.533091", "0.53269607", "0.5278374", "0.5247649", "0.5233488", "0.5222864", "0.5213202", "0.5204529", "0.51684767", "0.516591", "0.51639456", "0.5141017", "0.513316", "...
0.6067558
1
Uses information in `line` to request and return the RSS feed
def request_rss(self, url): return feedparser.parse(url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_rss(url):", "def get_news(url):\r\n \r\n # parse RSS feed into list of dictionaries\r\n feed = feedparser.parse(url)\r\n\r\n # no RSS feed articles for url\r\n if len(feed['entries']) == 0:\r\n return []\r\n \r\n # get first ten articles from the RSS feed\r\n news = []\r\n ...
[ "0.665676", "0.63081646", "0.6112597", "0.60895586", "0.60594904", "0.60477144", "0.60260314", "0.598809", "0.5984063", "0.59758997", "0.5936038", "0.5913608", "0.58760685", "0.5829383", "0.58091927", "0.5775324", "0.5773286", "0.5708184", "0.56792194", "0.56642944", "0.56638...
0.696534
0