function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def revert_image(self): log.info('download-revert', name=self.name) if self._download_state == ImageDownload.DOWNLOAD_SUCCEEDED: pass # TODO: Implement self._image_state = ImageDownload.IMAGE_INACTIVE returnValue('TODO: Implement this')
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def monitor_state_to_activate_state(self, state): if ':' in state: state = state.split(':')[-1] result = { 'enabling-software': ImageDownload.IMAGE_ACTIVATE, # currently enabling the software 'software-enabled': ImageDownload.IMAGE_ACTIVE, # success...
opencord/voltha
[ 73, 117, 73, 17, 1484694318 ]
def execute(self): msg = self.xpybuild(shouldFail=False, args=[], buildfile='root.xpybuild.py') self.startProcess(self.output+'/build-output/test', [], stdout='test.out', stderr='test.err')
xpybuild/xpybuild
[ 7, 4, 7, 5, 1486396682 ]
def LOG_CRITICAL(msg, *args, **kwargs): # pragma: no cover logger.critical(msg, *args, **kwargs) logging.shutdown() sys.exit(1)
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def LOG_INFO(msg, *args, **kwargs): # pragma: no cover logger.info(msg, *args, **kwargs)
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def decorate(func): _STATIC_VARS.append((func, varname, value)) setattr(func, varname, copy(value)) return func
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def reset_static_vars(): for(func, varname, value) in _STATIC_VARS: setattr(func, varname, copy(value))
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def find_idx(name): for idx, field in enumerate(fields): if name == field: return idx return -1
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def add_pragmas(json_item, p4_object): json_item["pragmas"] = list(p4_object._pragmas)
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def get_padding_name(i): return "_padding_{}".format(i)
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_headers(json_dict, hlir, keep_pragmas=False): headers = [] id_ = 0 for name, p4_header_instance in hlir.p4_header_instances.items(): if p4_header_instance.virtual: continue header_instance_dict = OrderedDict() header_instance_dict["name"] = name header_in...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def __init__(self, name, size, header_type): self.name = name self.size = size self.header_type = header_type self.ids = []
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def field_suffix(p4_field): suffix = p4_field.name if suffix == "valid": suffix = "$valid$" return suffix
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def header_type_field_offset(p4_header_type, fname): for idx, f in enumerate(p4_header_type.layout): if f == fname: return idx LOG_CRITICAL("No field {} in header type {}".format( # pragma: no cover fname, p4_header_type.name))
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def make_expression(op, L, R): e = OrderedDict( [("type", "expression"), ("value", OrderedDict( [("op", op), ("left", L), ("right", R)]))]) return e
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def format_hexstr(i): # Python appends a L at the end of a long number representation, which we # need to remove return hex(i).rstrip("L")
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def is_register_ref(obj): try: return (type(obj) is p4.p4_register_ref) except AttributeError: return False
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def build_match_value(widths, value): res = "" for width in reversed(widths): mask = (1 << width) - 1 val = value & mask num_bytes = (width + 7) / 8 res = "{0:0{1}x}".format(val, 2 * num_bytes) + res value = value >> width return "0x" + res
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_one_parser(parser_name, parser_id, p4_start_state, keep_pragmas=False): parser_dict = OrderedDict() parser_dict["name"] = parser_name parser_dict["id"] = parser_id parser_dict["init_state"] = p4_start_state.name parse_states = [] accessible_parse_states = set() accessible_parse_sta...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_parsers(json_dict, hlir, keep_pragmas=False): parsers = [] parser_id = 0 for name, p4_parse_state in hlir.p4_parse_states.items(): new_name = None if name == "start": new_name = "parser" elif "packet_entry" in p4_parse_state._pragmas: new_name = name...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def process_forced_header_ordering(hlir, ordering): p4_ordering = [] for hdr_name in ordering: if hdr_name in hlir.p4_header_instances: p4_ordering.append(hlir.p4_header_instances[hdr_name]) elif hdr_name + "[0]" in hlir.p4_header_instances: hdr_0 = hlir.p4_header_instanc...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def __init__(self, parse_state, prev_hdr_node, tag_stacks_index): self.current_state = parse_state self.prev_hdr_node = prev_hdr_node self.stacks = frozenset(tag_stacks_index.items())
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def __hash__(self): return hash((self.current_state, self.prev_hdr_node, self.stacks))
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def __str__(self): # pragma: no cover return "{}, {}, {}".format( self.current_state, self.prev_hdr_node, self.stacks)
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def walk_rec(hlir, parse_state, prev_hdr_node, tag_stacks_index, visited, recursion_states): assert(isinstance(parse_state, p4.p4_parse_state)) rec_state = State(parse_state, prev_hdr_node, tag_stacks_index) if rec_state in recursion_states: return recursion_...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_one_deparser(deparser_name, deparser_id, p4_start_state, hlir): deparser_dict = OrderedDict() deparser_dict["name"] = deparser_name deparser_dict["id"] = deparser_id deparser_id = deparser_id header_topo_sorting = produce_parser_topo_sorting(hlir, p4_start_state) deparser_order = [hdr....
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_deparsers(json_dict, hlir, p4_v1_1=False): deparsers = [] deparser_id = 0 for name, p4_parse_state in hlir.p4_parse_states.items(): new_name = None if name == "start": new_name = "deparser" elif "packet_entry" in p4_parse_state._pragmas: new_name = n...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def get_nodes(pipe_ptr, node_set): if pipe_ptr is None: return if pipe_ptr in node_set: return node_set.add(pipe_ptr) for next_node in pipe_ptr.next_.values(): get_nodes(next_node, node_set)
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def get_table_match_type(p4_table): match_types = [] for _, m_type, _ in p4_table.match_fields: match_types.append(match_type_to_str(m_type)) if len(match_types) == 0: match_type = "exact" elif "range" in match_types: match_type = "range" elif "ternary" in match_types: ...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_action_profile(pipe_name, action_profiles, p4_action_profile, keep_pragmas=False): # check that the same action profile is not referenced across multiple # control flows. This is somewhat of an artifical restriction imposed by the # pipeline abstraction in the JSON if p4...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_one_pipeline(json_dict, pipe_name, pipe_ptr, hlir, keep_pragmas=False): def get_table_name(p4_table): if not p4_table: return None return p4_table.name def table_has_counters(p4_table): for name, counter in hlir.p4_counters.items(): if counter.binding ==...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def index_OrderedDict(self, kf): idx = 0 for k, v in self.items(): if(k == kf): return idx idx += 1
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def field_list_to_learn_id(p4_field_list): ids = field_list_to_learn_id.ids if p4_field_list in ids: return ids[p4_field_list] idx = len(ids) + 1 ids[p4_field_list] = idx return idx
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def field_list_to_id(p4_field_list): ids = field_list_to_id.ids if p4_field_list in ids: return ids[p4_field_list] idx = len(ids) + 1 ids[p4_field_list] = idx return idx
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_actions(json_dict, hlir, p4_v1_1=False, keep_pragmas=False): actions = [] action_id = 0 table_actions_set = get_p4_action_set(hlir) for action in table_actions_set: action_dict = OrderedDict() action_dict["name"] = action.name action_dict["id"] = action_id acti...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_checksums(json_dict, hlir): checksums = [] id_ = 0 for name, p4_header_instance in hlir.p4_header_instances.items(): for field_instance in p4_header_instance.fields: field_ref = format_field_ref(field_instance) field_name = '.'.join(field_ref) for calcula...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_learn_lists(json_dict, hlir): learn_lists = [] learn_list_ids = field_list_to_learn_id.ids for p4_field_list, id_ in learn_list_ids.items(): learn_list_dict = OrderedDict() learn_list_dict["id"] = id_ learn_list_dict["name"] = p4_field_list.name elements = [] ...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_meters(json_dict, hlir, keep_pragmas=False): meters = [] id_ = 0 for name, p4_meter in hlir.p4_meters.items(): meter_dict = OrderedDict() meter_dict["name"] = name meter_dict["id"] = id_ id_ += 1 if p4_meter.binding and (p4_meter.binding[0] == p4.P4_DIRECT): ...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_registers(json_dict, hlir, keep_pragmas=False): registers = [] id_ = 0 for name, p4_register in hlir.p4_registers.items(): if p4_register.binding and (p4_register.binding[0] == p4.P4_DIRECT): LOG_CRITICAL("'{}' is a direct register; direct registers are not " ...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_force_arith(json_dict, hlir): force_arith = [] headers = ["standard_metadata", "intrinsic_metadata"] for header_name in headers: if header_name not in hlir.p4_header_instances: continue p4_header_instance = hlir.p4_header_instances[header_name] p4_header_type =...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def dump_extern_instances(json_dict, hlir): extern_instances = [] id_ = 0 for name, p4_extern_instance in hlir.p4_extern_instances.items(): extern_instance_dict = OrderedDict() extern_instance_dict["name"] = name extern_instance_dict["id"] = id_ extern_instance_dict["type"] =...
p4lang/p4c-bm
[ 23, 30, 23, 3, 1438962502 ]
def setUp(self): super(AwsVpcS3EndpointTest, self).setUp() self.mock_vpc = mock.Mock() self.mock_vpc.region = REGION self.mock_run_cmd = self.enter_context( mock.patch.object(aws_vpc_endpoint.AwsVpcS3Endpoint, '_RunCommand'))
GoogleCloudPlatform/PerfKitBenchmarker
[ 1785, 474, 1785, 248, 1405617806 ]
def testEndPointIdNoVpc(self): # initialize with no VPC means no immediate lookups done endpoint = self._InitEndpoint(None) self.assertIsNone(endpoint.id) endpoint._RunCommand.assert_not_called()
GoogleCloudPlatform/PerfKitBenchmarker
[ 1785, 474, 1785, 248, 1405617806 ]
def testCreate(self): # shows that a call to .Create() will get the routing table info followed # by the create-vpc-endpoint call endpoint = self._InitEndpoint(VPC_ID) self.mock_run_cmd.reset_mock() self.mock_run_cmd.side_effect = [ [], # query for endpoint id [ROUTE_TABLE_ID], # q...
GoogleCloudPlatform/PerfKitBenchmarker
[ 1785, 474, 1785, 248, 1405617806 ]
def install(self, env): self.install_packages(env)
arenadata/ambari
[ 3, 7, 3, 3, 1478181309 ]
def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and compare_versions(format_stack_version(params.version), '4.1.0.0') >= 0: stack_select.select_packages(params.version) # This is extremely important since it should only be called if c...
arenadata/ambari
[ 3, 7, 3, 3, 1478181309 ]
def stop(self, env, upgrade_type=None): import params env.set_params(params) ensure_base_directories() daemon_cmd = format('source {params.conf_dir}/kafka-env.sh; {params.kafka_bin} stop') Execute(daemon_cmd, user=params.kafka_user, ) File (params.kafka_pid_file, action...
arenadata/ambari
[ 3, 7, 3, 3, 1478181309 ]
def verify_ids(doc_iter, es_host, index, doc_type=None, step=100000, ): '''verify how many docs from input interator/list overlapping with existing docs.''' index = index doc_type = doc_type es = get_es(es_host) q = {'query': {'ids': {"values": []}}} total_cnt = 0 found_cnt = 0 out = []...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def wrapper(func): '''this wrapper allows passing index and doc_type from wrapped method.''' def outter_fn(*args, **kwargs): self = args[0] index = kwargs.pop('index', self._index) # pylint: disable=protected-access doc_type = kwargs.pop('doc_type', self._doc_type) # pylin...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def __init__(self, client, index_name): self.client = client # synchronous self.index_name = index_name # MUST exist
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def doc_type(self): if int(self.client.info()['version']['number'].split('.')[0]) < 7: mappings = self.client.indices.get_mapping(self.index_name) mappings = mappings[self.index_name]["mappings"] return next(iter(mappings.keys())) return None
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def __init__(self, index, doc_type='_doc', es_host='localhost:9200', step=500, step_size=10, # elasticsearch.helpers.bulk number_of_shards=1, number_of_replicas=0, check_index=True, **kwargs): self.es_host = es_host self._es = get_es(es_host, **kwargs)...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_biothing(self, bid, only_source=False, **kwargs): rawdoc = self._es.get(index=self._index, id=bid, doc_type=self._doc_type, **kwargs) if not only_source: return rawdoc else: doc = rawdoc['_source'] doc["_id"] = rawdoc["_id"] return doc
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def exists(self, bid): """return True/False if a biothing id exists or not.""" try: doc = self.get_biothing(bid, stored_fields=None) return doc['found'] except NotFoundError: return False
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def mexists(self, bid_list): q = { "query": { "ids": { "values": bid_list } } } res = self._es.search(index=self._index, doc_type=self._doc_type, body=q, stored_fields=None, size=len(bid_list)) # id_set = set([do...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def count(self, q=None, raw=False): try: _res = self._es.count(index=self._index, doc_type=self._doc_type, body=q) return _res if raw else _res['count'] except NotFoundError: return None
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def count_src(self, src): if isinstance(src, str): src = [src] cnt_d = {} for _src in src: q = { "query": { "constant_score": { "filter": { "exists": {"field": _src} ...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def create_index(self, mapping=None, extra_settings=None): if not self._es.indices.exists(index=self._index): body = { 'settings': { 'number_of_shards': self.number_of_shards, "number_of_replicas": self.number_of_replicas, } ...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def exists_index(self): return self._es.indices.exists(index=self._index)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def index_bulk(self, docs, step=None, action='index'): self._populate_es_version() index_name = self._index doc_type = self._doc_type step = step or self.step def _get_bulk(doc): # keep original doc ndoc = copy.copy(doc) ndoc.update({ ...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def delete_docs(self, ids, step=None): '''delete a list of docs in bulk.''' index_name = self._index doc_type = self._doc_type step = step or self.step def _get_bulk(_id): if self._host_major_ver >= 7: doc = { '_op_type': 'delete',...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def update(self, id, extra_doc, upsert=True): # pylint: disable=redefined-builtin '''update an existing doc with extra_doc. allow to set upsert=True, to insert new docs. ''' body = {'doc': extra_doc} if upsert: body['doc_as_upsert'] = True return s...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def _get_bulk(doc): if self._host_major_ver >= 7: doc = { '_op_type': 'update', "_index": index_name, "_id": doc['_id'], "doc": doc } else: doc = { ...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_mapping(self): """return the current index mapping""" if self._host_major_ver <= 6: m = self._es.indices.get_mapping( index=self._index, doc_type=self._doc_type, ) return m[self._index]["mappings"] elif self._host_major_...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_mapping_meta(self): """return the current _meta field.""" m = self.get_mapping() doc_type = self._doc_type if doc_type is None: # fetch doc_type from mapping assert len(m) == 1, "More than one doc_type found, not supported when self._doc_type " + \ ...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def build_index(self, collection, verbose=True, query=None, bulk=True, update=False, allow_upsert=True): index_name = self._index # update some settings for bulk indexing body = { "index": { "refresh_interval": "-1", # disable refresh temporarily ...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def rate_control(cnt, t): delay = 0 if t > 90: delay = 30 elif t > 60: delay = 10 if delay: time.sleep(delay)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def optimize(self, max_num_segments=1): '''optimize the default index.''' params = { "wait_for_merge": False, "max_num_segments": max_num_segments, } return self._es.indices.forcemerge(index=self._index, params=params)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def doc_feeder_using_helper(self, step=None, verbose=True, query=None, scroll='10m', **kwargs): # verbose unimplemented step = step or self.step q = query if query else {'query': {'match_all': {}}} for rawdoc in helpers.scan(client=self._es, query=q, scroll=scroll, index=self._index, ...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def doc_feeder(self, step=None, verbose=True, query=None, scroll='10m', only_source=True, **kwargs): step = step or self.step q = query if query else {'query': {'match_all': {}}} _q_cnt = self.count(q=q, raw=True) n = _q_cnt['count'] n_shards = _q_cnt['_shards']['total'] ...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_id_list(self, step=None, verbose=True): step = step or self.step cur = self.doc_feeder(step=step, _source=False, verbose=verbose) for doc in cur: yield doc['_id']
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_docs(self, ids, step=None, only_source=True, **mget_args): ''' Return matching docs for given ids iterable, if not found return None. A generator is returned to the matched docs. If only_source is False, the entire document is returned, otherwise only the source is returned. '''...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def snapshot(self, repo, snapshot, mode=None, **params): body = { "indices": self._index, "include_global_state": False # there is no reason to include global state in our application # we want to separate the staging env from the production env # (glo...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_repository(self, repo_name): try: return self._es.snapshot.get_repository(repo_name) except NotFoundError: raise IndexerException("Repository '%s' doesn't exist" % repo_name)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_snapshot_status(self, repo, snapshot): return self._es.snapshot.status(repo, snapshot)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def generate_es_mapping(inspect_doc, init=True, level=0): """Generate an ES mapping according to "inspect_doc", which is produced by biothings.utils.inspect module""" map_tpl = { int: {"type": "integer"}, bool: {"type": "boolean"}, float: {"type": "float"}, str: {"type": "key...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_hub_db_conn(): return Database()
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_src_dump(): db = Database() return db[db.CONFIG.DATA_SRC_DUMP_COLLECTION]
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_src_build(): db = Database() return db[db.CONFIG.DATA_SRC_BUILD_COLLECTION]
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_data_plugin(): db = Database() return db[db.CONFIG.DATA_PLUGIN_COLLECTION]
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_cmd(): db = Database() return db[db.CONFIG.CMD_COLLECTION]
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_hub_config(): db = Database() return db[getattr(db.CONFIG, "HUB_CONFIG_COLLECTION", "hub_config")]
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def get_last_command(): cmds = list(sorted(get_cmd()._read().values(), key=lambda cmd: cmd["_id"])) return cmds[-1] if cmds else None
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def __init__(self): super(Database, self).__init__() self.name = self.CONFIG.DATA_HUB_DB_DATABASE self.host = self.CONFIG.HUB_DB_BACKEND["host"] self.client = Elasticsearch(self.host, serializer=_HubDBEncoder()) if not self.client.indices.exists(index=self.name): sel...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def address(self): return self.host
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def _exists(self, _id): return self.client.exists(self.name, _id)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def _write(self, _id, doc): assert doc.pop("_id", None) in (_id, None) self.client.index(self.name, doc, id=_id) self.client.indices.refresh(self.name)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def create_collection(self, colname): return self[colname]
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def __init__(self, colname, db): self.name = colname self.db = db if not self.db._exists(colname): self._write({})
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def _read(self): return self.db._read(self.name)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def _exists_one(self, _id): return str(_id) in self._read()
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def func(collection): collection[str(doc["_id"])] = doc
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def __getitem__(self, _id): return self.find_one({"_id": _id})
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def find(self, filter=None, projection=None, *args, **kwargs): if args or kwargs: raise NotImplementedError() results = [] logger = logging.getLogger(__name__) for doc in self._read().values(): _doc = dict(traverse(doc)) # dotdict _doc.update(dict(t...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def replace_one(self, filter, replacement, upsert=False, *args, **kwargs): if args or kwargs: raise NotImplementedError() doc = self.find_one(filter) or {} if not (doc or upsert): raise ValueError("No Match.") _id = doc.get("_id") or filter["_id"] repla...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def _update_one(self, doc, update, *args, **kwargs): if args or kwargs: raise NotImplementedError() if not len(update) == 1: raise ValueError("Invalid operator.") if next(iter(update)) not in ("$set", "$unset", "$push", "$addToSet", "$pull"): raise NotImple...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def update_many(self, filter, update, upsert=False, *args, **kwargs): docs = self.find(filter) if not docs and upsert: if any("." in k for k in filter): raise ValueError("dotfield in upsert.") docs = [filter] for doc in docs: self._update_one...
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def update(self, *args, **kwargs): # In the future, # Use replace_one(), update_one(), or update_many() instead. self.update_many(*args, **kwargs)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def remove(self, query): # In the future, # Use delete_one() or delete_many() instead. docs = self.find(query) collection = self._read() for doc in docs: del collection[doc["_id"]] self._write(collection)
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]
def _pyobj(doc): # ES doc -> Python object for _, _doc in traverse(doc): if isinstance(_doc, dict): for k, v in list(_doc.items()): _doc[k] = _eval(v) elif isinstance(_doc, list): _doc[:] = map(_eval, _doc) return doc
biothings/biothings.api
[ 39, 25, 39, 80, 1452637246 ]